ESPResSo
Extensible Simulation Package for Research on Soft Matter Systems
Loading...
Searching...
No Matches
lattice_boltzmann/generated_kernels/myintrin.h
Go to the documentation of this file.
1/*
2Copyright 2019-2021, Michael Kuron.
3
4Redistribution and use in source and binary forms, with or without
5modification, are permitted provided that the following conditions are
6met:
7
8* Redistributions of source code must retain the above copyright
9 notice, this list of conditions, and the following disclaimer.
10
11* Redistributions in binary form must reproduce the above copyright
12 notice, this list of conditions, and the following disclaimer in the
13 documentation and/or other materials provided with the distribution.
14
15* Neither the name of the copyright holder nor the names of its
16 contributors may be used to endorse or promote products derived from
17 this software without specific prior written permission.
18
19THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30*/
31
32/**
33 * @file
34 * Philox counter-based RNG utility functions.
35 * Adapted from the pystencils source file
36 * https://i10git.cs.fau.de/pycodegen/pystencils/-/blob/39c214af/pystencils/include/myintrin.h
37 */
38
39#pragma once
40
41#if defined(__SSE2__) || defined(_MSC_VER)
43#ifdef __AVX512VL__
44 return _mm_cvtepu32_ps(v);
45#else
50 return _mm_add_ps(_mm_add_ps(v2f, v2f), v1f);
51#endif
52}
53
55 __m128i &R3) {
56 __m128i T0, T1, T2, T3;
65}
66#endif
67
68#if defined(__SSE4_1__) || defined(_MSC_VER)
69#if !defined(__AVX512VL__) && defined(__GNUC__) && __GNUC__ >= 5 && \
70 !defined(__clang__)
71__attribute__((optimize("no-associative-math")))
72#endif
74_my_cvtepu64_pd(const __m128i x) {
75#ifdef __AVX512VL__
76 return _mm_cvtepu64_pd(x);
77#elif defined(__clang__)
80#else
81 __m128i xH = _mm_srli_epi64(x, 32);
83 xH, _mm_castpd_si128(_mm_set1_pd(19342813113834066795298816.))); // 2^84
85 x, _mm_castpd_si128(_mm_set1_pd(0x0010000000000000)), 0xcc); // 2^52
86 __m128d f =
88 _mm_set1_pd(19342813118337666422669312.)); // 2^84 + 2^52
89 return _mm_add_pd(f, _mm_castsi128_pd(xL));
90#endif
91}
92#endif
93
94#ifdef __AVX2__
96#if (!defined(__GNUC__) || __GNUC__ >= 8) || defined(__clang__)
97 return _mm256_set_m128i(hi, lo);
98#else
100#endif
101}
102
104#if (!defined(__GNUC__) || __GNUC__ >= 8) || defined(__clang__)
105 return _mm256_set_m128d(hi, lo);
106#else
108#endif
109}
110
112#ifdef __AVX512VL__
113 return _mm256_cvtepu32_ps(v);
114#else
120#endif
121}
122
123#if !defined(__AVX512VL__) && defined(__GNUC__) && __GNUC__ >= 5 && \
124 !defined(__clang__)
125__attribute__((optimize("no-associative-math")))
126#endif
129#ifdef __AVX512VL__
130 return _mm256_cvtepu64_pd(x);
131#elif defined(__clang__)
134#else
137 19342813113834066795298816.))); // 2^84
139 x, _mm256_castpd_si256(_mm256_set1_pd(0x0010000000000000)),
140 0xcc); // 2^52
143 _mm256_set1_pd(19342813118337666422669312.)); // 2^84 + 2^52
145#endif
146}
147#endif
148
149#ifdef __AVX512F__
151 __m128i a) {
152 return _mm512_inserti32x4(
154 2),
155 d, 3);
156}
157
160}
161#endif
cudaStream_t stream[1]
CUDA streams for parallel computing on CPU and GPU.