memory_access_utils.h 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144
  1. // Copyright (c) Microsoft Corporation.
  2. // SPDX-License-Identifier: Apache-2.0
  3. // DeepSpeed Team
  4. #pragma once
  5. #include <cuda.h>
  6. #include "ds_kernel_utils.h"
  7. /////////////////////////////// Memory Access Utils ///////////////////////////////
  8. namespace mem_access {
  9. enum class LoadPolicy {
  10. CacheAll, // Cache at all levels
  11. CacheGlobal, // Cache at L2 only
  12. CacheStreaming // Cache with evict first policy
  13. };
  14. enum class StorePolicy {
  15. Writeback, // Cache in L1, write-back on eviction
  16. CacheGlobal, // Bypass L1, write-back on eviction
  17. CacheStreaming // Allocate cache line with evict first policy
  18. };
  19. template <int AccessSize, LoadPolicy policy = LoadPolicy::CacheAll>
  20. __device__ __forceinline__ void load_global(void* dst, const void* src);
  21. template <int AccessSize, LoadPolicy policy = LoadPolicy::CacheAll>
  22. __device__ __forceinline__ void load_global(void* dst, const void* src, bool do_access);
  23. // Shared accesses have no cache policy
  24. template <int AccessSize>
  25. __device__ __forceinline__ void load_shared(void* dst, const void* src);
  26. template <int AccessSize>
  27. __device__ __forceinline__ void load_shared(void* dst, const void* src, bool do_access);
  28. template <int AccessSize, StorePolicy policy = StorePolicy::Writeback>
  29. __device__ __forceinline__ void store_global(void* dst, const void* src);
  30. // Shared accesses have no cache policy
  31. template <int AccessSize>
  32. __device__ __forceinline__ void store_shared(void* dst, const void* src);
  33. #ifdef ASYNC_COPY_AVAILABLE
  34. template <int AccessSize>
  35. __device__ __forceinline__ void memcpy_async(void* shr, const void* gbl);
  36. template <int AccessSize>
  37. __device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate);
  38. template <int AccessSize>
  39. __device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate);
  40. __device__ __forceinline__ void memcpy_async_fence();
  41. template <int stages>
  42. __device__ __forceinline__ void memcpy_async_wait();
  43. template <int stages>
  44. __device__ __forceinline__ void tail_complete_wait(int remaining_stages);
  45. #endif
  46. // Util for tracking pipeline buffers
  47. // TODO: Evaluate whether this should also be guarded by ASYNC_COPY_AVAILABLE
  48. template <int max>
  49. class BufferTracker {
  50. public:
  51. int current_state;
  52. __device__ __forceinline__ BufferTracker() : current_state(0) {}
  53. __device__ __forceinline__ int get()
  54. {
  55. int return_val = current_state++;
  56. current_state = (current_state == max ? 0 : current_state);
  57. return return_val;
  58. }
  59. };
  60. __device__ __forceinline__ uint32_t lane_id()
  61. {
  62. #ifdef PTX_AVAILABLE
  63. unsigned int lane_id;
  64. asm volatile("mov.u32 %0, %%laneid;" : "=r"(lane_id));
  65. return lane_id;
  66. #else
  67. return threadIdx.x & (warpSize - 1); // Portable
  68. #endif
  69. }
  70. /////////// Load Global ///////////
  71. template <>
  72. __device__ __forceinline__ void load_global<16>(void* dst, const void* src)
  73. {
  74. uint4* data = reinterpret_cast<uint4*>(dst);
  75. #ifdef PTX_AVAILABLE
  76. asm volatile("ld.global.ca.v4.u32 {%0, %1, %2, %3}, [%4];\n"
  77. : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
  78. : "l"(src));
  79. #else
  80. const uint4* src_cast = reinterpret_cast<const uint4*>(src);
  81. data[0] = src_cast[0];
  82. #endif
  83. }
  84. template <>
  85. __device__ __forceinline__ void load_global<16>(void* dst, const void* src, bool do_access)
  86. {
  87. uint4* data = reinterpret_cast<uint4*>(dst);
  88. #ifdef PTX_AVAILABLE
  89. asm volatile(
  90. "{\n"
  91. "\t.reg .pred p;\n"
  92. "\tsetp.ne.b32 p, %5, 0;\n"
  93. "\tmov.b32 %0, 0;\n"
  94. "\tmov.b32 %1, 0;\n"
  95. "\tmov.b32 %2, 0;\n"
  96. "\tmov.b32 %3, 0;\n"
  97. "\t@p ld.global.v4.u32 {%0, %1, %2, %3}, [%4];\n"
  98. "}\n"
  99. : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
  100. : "l"(src), "r"((int)do_access));
  101. #else
  102. const uint4* src_cast = reinterpret_cast<const uint4*>(src);
  103. if (do_access) {
  104. data[0] = src_cast[0];
  105. } else {
  106. data[0].x = 0;
  107. data[0].y = 0;
  108. data[0].z = 0;
  109. data[0].w = 0;
  110. }
  111. #endif
  112. }
  113. template <>
  114. __device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst, const void* src)
  115. {
  116. uint4* data = reinterpret_cast<uint4*>(dst);
  117. #ifdef PTX_AVAILABLE
  118. asm volatile("ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
  119. : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
  120. : "l"(src));
  121. #else
  122. const uint4* src_cast = reinterpret_cast<const uint4*>(src);
  123. data[0] = src_cast[0];
  124. #endif
  125. }
  126. template <>
  127. __device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst,
  128. const void* src,
  129. bool do_access)
  130. {
  131. uint4* data = reinterpret_cast<uint4*>(dst);
  132. #ifdef PTX_AVAILABLE
  133. asm volatile(
  134. "{\n"
  135. "\t.reg .pred p;\n"
  136. "\tsetp.ne.b32 p, %5, 0;\n"
  137. "\tmov.b32 %0, 0;\n"
  138. "\tmov.b32 %1, 0;\n"
  139. "\tmov.b32 %2, 0;\n"
  140. "\tmov.b32 %3, 0;\n"
  141. "\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
  142. "}\n"
  143. : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
  144. : "l"(src), "r"((int)do_access));
  145. #else
  146. const uint4* src_cast = reinterpret_cast<const uint4*>(src);
  147. if (do_access) {
  148. data[0] = src_cast[0];
  149. } else {
  150. data[0].x = 0;
  151. data[0].y = 0;
  152. data[0].z = 0;
  153. data[0].w = 0;
  154. }
  155. #endif
  156. }
  157. template <>
  158. __device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst,
  159. const void* src)
  160. {
  161. uint4* data = reinterpret_cast<uint4*>(dst);
  162. #ifdef PTX_AVAILABLE
  163. asm volatile("ld.global.cs.v4.u32 {%0, %1, %2, %3}, [%4];\n"
  164. : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
  165. : "l"(src));
  166. #else
  167. const uint4* src_cast = reinterpret_cast<const uint4*>(src);
  168. data[0] = src_cast[0];
  169. #endif
  170. }
  171. template <>
  172. __device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst,
  173. const void* src,
  174. bool do_access)
  175. {
  176. uint4* data = reinterpret_cast<uint4*>(dst);
  177. #ifdef PTX_AVAILABLE
  178. asm volatile(
  179. "{\n"
  180. "\t.reg .pred p;\n"
  181. "\tsetp.ne.b32 p, %5, 0;\n"
  182. "\tmov.b32 %0, 0;\n"
  183. "\tmov.b32 %1, 0;\n"
  184. "\tmov.b32 %2, 0;\n"
  185. "\tmov.b32 %3, 0;\n"
  186. "\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
  187. "}\n"
  188. : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
  189. : "l"(src), "r"((int)do_access));
  190. #else
  191. const uint4* src_cast = reinterpret_cast<const uint4*>(src);
  192. if (do_access) {
  193. data[0] = src_cast[0];
  194. } else {
  195. data[0].x = 0;
  196. data[0].y = 0;
  197. data[0].z = 0;
  198. data[0].w = 0;
  199. }
  200. #endif
  201. }
  202. template <>
  203. __device__ __forceinline__ void load_global<8>(void* dst, const void* src)
  204. {
  205. uint2* data = reinterpret_cast<uint2*>(dst);
  206. #ifdef PTX_AVAILABLE
  207. asm volatile("ld.global.ca.v2.u32 {%0, %1}, [%2];\n"
  208. : "=r"(data[0].x), "=r"(data[0].y)
  209. : "l"(src));
  210. #else
  211. const uint2* src_cast = reinterpret_cast<const uint2*>(src);
  212. data[0] = src_cast[0];
  213. #endif
  214. }
  215. template <>
  216. __device__ __forceinline__ void load_global<8>(void* dst, const void* src, bool do_access)
  217. {
  218. uint2* data = reinterpret_cast<uint2*>(dst);
  219. #ifdef PTX_AVAILABLE
  220. asm volatile(
  221. "{\n"
  222. "\t.reg .pred p;\n"
  223. "\tsetp.ne.b32 p, %3, 0;\n"
  224. "\tmov.b32 %0, 0;\n"
  225. "\tmov.b32 %1, 0;\n"
  226. "\t@p ld.global.v2.u32 {%0, %1}, [%2];\n"
  227. "}\n"
  228. : "=r"(data[0].x), "=r"(data[0].y)
  229. : "l"(src), "r"((int)do_access));
  230. #else
  231. const uint2* src_cast = reinterpret_cast<const uint2*>(src);
  232. if (do_access) {
  233. data[0] = src_cast[0];
  234. } else {
  235. data[0].x = 0;
  236. data[0].y = 0;
  237. }
  238. #endif
  239. }
  240. template <>
  241. __device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst, const void* src)
  242. {
  243. uint2* data = reinterpret_cast<uint2*>(dst);
  244. #ifdef PTX_AVAILABLE
  245. asm volatile("ld.global.cg.v2.u32 {%0, %1}, [%2];\n"
  246. : "=r"(data[0].x), "=r"(data[0].y)
  247. : "l"(src));
  248. #else
  249. const uint2* src_cast = reinterpret_cast<const uint2*>(src);
  250. data[0] = src_cast[0];
  251. #endif
  252. }
  253. template <>
  254. __device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst,
  255. const void* src,
  256. bool do_access)
  257. {
  258. uint2* data = reinterpret_cast<uint2*>(dst);
  259. #ifdef PTX_AVAILABLE
  260. asm volatile(
  261. "{\n"
  262. "\t.reg .pred p;\n"
  263. "\tsetp.ne.b32 p, %3, 0;\n"
  264. "\tmov.b32 %0, 0;\n"
  265. "\tmov.b32 %1, 0;\n"
  266. "\t@p ld.global.cg.v2.u32 {%0, %1}, [%2];\n"
  267. "}\n"
  268. : "=r"(data[0].x), "=r"(data[0].y)
  269. : "l"(src), "r"((int)do_access));
  270. #else
  271. const uint2* src_cast = reinterpret_cast<const uint2*>(src);
  272. if (do_access) {
  273. data[0] = src_cast[0];
  274. } else {
  275. data[0].x = 0;
  276. data[0].y = 0;
  277. }
  278. #endif
  279. }
  280. template <>
  281. __device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst,
  282. const void* src)
  283. {
  284. uint2* data = reinterpret_cast<uint2*>(dst);
  285. #ifdef PTX_AVAILABLE
  286. asm volatile("ld.global.cs.v2.u32 {%0, %1}, [%2];\n"
  287. : "=r"(data[0].x), "=r"(data[0].y)
  288. : "l"(src));
  289. #else
  290. const uint2* src_cast = reinterpret_cast<const uint2*>(src);
  291. data[0] = src_cast[0];
  292. #endif
  293. }
  294. template <>
  295. __device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst,
  296. const void* src,
  297. bool do_access)
  298. {
  299. uint2* data = reinterpret_cast<uint2*>(dst);
  300. #ifdef PTX_AVAILABLE
  301. asm volatile(
  302. "{\n"
  303. "\t.reg .pred p;\n"
  304. "\tsetp.ne.b32 p, %3, 0;\n"
  305. "\tmov.b32 %0, 0;\n"
  306. "\tmov.b32 %1, 0;\n"
  307. "\t@p ld.global.cs.v2.u32 {%0, %1}, [%2];\n"
  308. "}\n"
  309. : "=r"(data[0].x), "=r"(data[0].y)
  310. : "l"(src), "r"((int)do_access));
  311. #else
  312. const uint2* src_cast = reinterpret_cast<const uint2*>(src);
  313. if (do_access) {
  314. data[0] = src_cast[0];
  315. } else {
  316. data[0].x = 0;
  317. data[0].y = 0;
  318. }
  319. #endif
  320. }
  321. template <>
  322. __device__ __forceinline__ void load_global<4>(void* dst, const void* src)
  323. {
  324. int32_t* data = reinterpret_cast<int32_t*>(dst);
  325. #ifdef PTX_AVAILABLE
  326. asm volatile("ld.global.ca.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
  327. #else
  328. const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
  329. data[0] = src_cast[0];
  330. #endif
  331. }
  332. template <>
  333. __device__ __forceinline__ void load_global<4>(void* dst, const void* src, bool do_access)
  334. {
  335. int32_t* data = reinterpret_cast<int32_t*>(dst);
  336. #ifdef PTX_AVAILABLE
  337. asm volatile(
  338. "{\n"
  339. "\t.reg .pred p;\n"
  340. "\tsetp.ne.b32 p, %2, 0;\n"
  341. "\tmov.b32 %0, 0;\n"
  342. "\t@p ld.global.u32 {%0}, [%1];\n"
  343. "}\n"
  344. : "=r"(data[0])
  345. : "l"(src), "r"((int)do_access));
  346. #else
  347. const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
  348. if (do_access) {
  349. data[0] = src_cast[0];
  350. } else {
  351. data[0] = 0;
  352. }
  353. #endif
  354. }
  355. template <>
  356. __device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst, const void* src)
  357. {
  358. int32_t* data = reinterpret_cast<int32_t*>(dst);
  359. #ifdef PTX_AVAILABLE
  360. asm volatile("ld.global.cg.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
  361. #else
  362. const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
  363. data[0] = src_cast[0];
  364. #endif
  365. }
  366. template <>
  367. __device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst,
  368. const void* src,
  369. bool do_access)
  370. {
  371. int32_t* data = reinterpret_cast<int32_t*>(dst);
  372. #ifdef PTX_AVAILABLE
  373. asm volatile(
  374. "{\n"
  375. "\t.reg .pred p;\n"
  376. "\tsetp.ne.b32 p, %2, 0;\n"
  377. "\tmov.b32 %0, 0;\n"
  378. "\t@p ld.global.cg.u32 {%0}, [%1];\n"
  379. "}\n"
  380. : "=r"(data[0])
  381. : "l"(src), "r"((int)do_access));
  382. #else
  383. const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
  384. if (do_access) {
  385. data[0] = src_cast[0];
  386. } else {
  387. data[0] = 0;
  388. }
  389. #endif
  390. }
  391. template <>
  392. __device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst,
  393. const void* src)
  394. {
  395. int32_t* data = reinterpret_cast<int32_t*>(dst);
  396. #ifdef PTX_AVAILABLE
  397. asm volatile("ld.global.cs.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
  398. #else
  399. const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
  400. data[0] = src_cast[0];
  401. #endif
  402. }
  403. template <>
  404. __device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst,
  405. const void* src,
  406. bool do_access)
  407. {
  408. int32_t* data = reinterpret_cast<int32_t*>(dst);
  409. #ifdef PTX_AVAILABLE
  410. asm volatile(
  411. "{\n"
  412. "\t.reg .pred p;\n"
  413. "\tsetp.ne.b32 p, %2, 0;\n"
  414. "\tmov.b32 %0, 0;\n"
  415. "\t@p ld.global.cs.u32 {%0}, [%1];\n"
  416. "}\n"
  417. : "=r"(data[0])
  418. : "l"(src), "r"((int)do_access));
  419. #else
  420. const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
  421. if (do_access) {
  422. data[0] = src_cast[0];
  423. } else {
  424. data[0] = 0;
  425. }
  426. #endif
  427. }
  428. template <>
  429. __device__ __forceinline__ void load_global<2>(void* dst, const void* src)
  430. {
  431. int16_t* data = reinterpret_cast<int16_t*>(dst);
  432. #ifdef PTX_AVAILABLE
  433. asm volatile("ld.global.ca.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
  434. #else
  435. const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
  436. data[0] = src_cast[0];
  437. #endif
  438. }
  439. template <>
  440. __device__ __forceinline__ void load_global<2>(void* dst, const void* src, bool do_access)
  441. {
  442. int16_t* data = reinterpret_cast<int16_t*>(dst);
  443. #ifdef PTX_AVAILABLE
  444. asm volatile(
  445. "{\n"
  446. "\t.reg .pred p;\n"
  447. "\tsetp.ne.b32 p, %2, 0;\n"
  448. "\tmov.u16 %0, 0;\n"
  449. "\t@p ld.global.u16 {%0}, [%1];\n"
  450. "}\n"
  451. : "=h"(*data)
  452. : "l"(src), "r"((int)do_access));
  453. #else
  454. const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
  455. if (do_access) {
  456. data[0] = src_cast[0];
  457. } else {
  458. data[0] = 0;
  459. }
  460. #endif
  461. }
  462. template <>
  463. __device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst, const void* src)
  464. {
  465. int16_t* data = reinterpret_cast<int16_t*>(dst);
  466. #ifdef PTX_AVAILABLE
  467. asm volatile("ld.global.cg.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
  468. #else
  469. const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
  470. data[0] = src_cast[0];
  471. #endif
  472. }
  473. template <>
  474. __device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst,
  475. const void* src,
  476. bool do_access)
  477. {
  478. int16_t* data = reinterpret_cast<int16_t*>(dst);
  479. #ifdef PTX_AVAILABLE
  480. asm volatile(
  481. "{\n"
  482. "\t.reg .pred p;\n"
  483. "\tsetp.ne.b32 p, %2, 0;\n"
  484. "\tmov.u16 %0, 0;\n"
  485. "\t@p ld.global.cg.u16 {%0}, [%1];\n"
  486. "}\n"
  487. : "=h"(*data)
  488. : "l"(src), "r"((int)do_access));
  489. #else
  490. const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
  491. if (do_access) {
  492. data[0] = src_cast[0];
  493. } else {
  494. data[0] = 0;
  495. }
  496. #endif
  497. }
  498. template <>
  499. __device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst,
  500. const void* src)
  501. {
  502. int16_t* data = reinterpret_cast<int16_t*>(dst);
  503. #ifdef PTX_AVAILABLE
  504. asm volatile("ld.global.cs.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
  505. #else
  506. const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
  507. data[0] = src_cast[0];
  508. #endif
  509. }
  510. template <>
  511. __device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst,
  512. const void* src,
  513. bool do_access)
  514. {
  515. int16_t* data = reinterpret_cast<int16_t*>(dst);
  516. #ifdef PTX_AVAILABLE
  517. asm volatile(
  518. "{\n"
  519. "\t.reg .pred p;\n"
  520. "\tsetp.ne.b32 p, %2, 0;\n"
  521. "\tmov.u16 %0, 0;\n"
  522. "\t@p ld.global.cs.u16 {%0}, [%1];\n"
  523. "}\n"
  524. : "=h"(*data)
  525. : "l"(src), "r"((int)do_access));
  526. #else
  527. const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
  528. if (do_access) {
  529. data[0] = src_cast[0];
  530. } else {
  531. data[0] = 0;
  532. }
  533. #endif
  534. }
  535. /////////// Load Shared ///////////
  536. namespace internal {
  537. #ifdef PTX_AVAILABLE
  538. __device__ __forceinline__ unsigned convert_to_shared(const void* ptr)
  539. {
  540. #if __CUDACC_VER_MAJOR__ >= 11
  541. // In CUDA 11 we have a builtin intrinsic
  542. return __cvta_generic_to_shared(ptr);
  543. #else
  544. unsigned ret_val;
  545. asm volatile(
  546. "{\n"
  547. "\t.reg .u64 p1;\n"
  548. "\tcvta.to.shared.u64 p1, %1\n"
  549. "\tcvt.u32.u64 %0, p1;\n"
  550. "}\n"
  551. : "=r"(ret_val)
  552. : "l"(ptr));
  553. return ret_val;
  554. #endif
  555. }
  556. #endif
  557. } // namespace internal
  558. template <>
  559. __device__ __forceinline__ void load_shared<16>(void* dst, const void* src)
  560. {
  561. uint4* data = reinterpret_cast<uint4*>(dst);
  562. #ifdef PTX_AVAILABLE
  563. unsigned src_shr = internal::convert_to_shared(src);
  564. asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
  565. : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
  566. : "r"(src_shr));
  567. #else
  568. const uint4* src_cast = reinterpret_cast<const uint4*>(src);
  569. data[0] = src_cast[0];
  570. #endif
  571. }
  572. template <>
  573. __device__ __forceinline__ void load_shared<16>(void* dst, const void* src, bool do_access)
  574. {
  575. uint4* data = reinterpret_cast<uint4*>(dst);
  576. #ifdef PTX_AVAILABLE
  577. unsigned src_shr = internal::convert_to_shared(src);
  578. asm volatile(
  579. "{\n"
  580. "\t.reg .pred p;\n"
  581. "\tsetp.ne.b32 p, %5, 0;\n"
  582. "\tmov.b32 %0, 0;\n"
  583. "\tmov.b32 %1, 0;\n"
  584. "\tmov.b32 %2, 0;\n"
  585. "\tmov.b32 %3, 0;\n"
  586. "\t@p ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
  587. "}\n"
  588. : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
  589. : "r"(src_shr), "r"((int)do_access));
  590. #else
  591. const uint4* src_cast = reinterpret_cast<const uint4*>(src);
  592. if (do_access) {
  593. data[0] = src_cast[0];
  594. } else {
  595. data[0].x = 0;
  596. data[0].y = 0;
  597. data[0].z = 0;
  598. data[0].w = 0;
  599. }
  600. #endif
  601. }
  602. template <>
  603. __device__ __forceinline__ void load_shared<8>(void* dst, const void* src)
  604. {
  605. uint2* data = reinterpret_cast<uint2*>(dst);
  606. #ifdef PTX_AVAILABLE
  607. unsigned src_shr = internal::convert_to_shared(src);
  608. asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];\n"
  609. : "=r"(data[0].x), "=r"(data[0].y)
  610. : "r"(src_shr));
  611. #else
  612. const uint2* src_cast = reinterpret_cast<const uint2*>(src);
  613. data[0] = src_cast[0];
  614. #endif
  615. }
  616. template <>
  617. __device__ __forceinline__ void load_shared<8>(void* dst, const void* src, bool do_access)
  618. {
  619. uint2* data = reinterpret_cast<uint2*>(dst);
  620. #ifdef PTX_AVAILABLE
  621. unsigned src_shr = internal::convert_to_shared(src);
  622. asm volatile(
  623. "{\n"
  624. "\t.reg .pred p;\n"
  625. "\tsetp.ne.b32 p, %3, 0;\n"
  626. "\tmov.b32 %0, 0;\n"
  627. "\tmov.b32 %1, 0;\n"
  628. "\t@p ld.shared.v2.u32 {%0, %1}, [%2];\n"
  629. "}\n"
  630. : "=r"(data[0].x), "=r"(data[0].y)
  631. : "r"(src_shr), "r"((int)do_access));
  632. #else
  633. const uint2* src_cast = reinterpret_cast<const uint2*>(src);
  634. if (do_access) {
  635. data[0] = src_cast[0];
  636. } else {
  637. data[0].x = 0;
  638. data[0].y = 0;
  639. }
  640. #endif
  641. }
  642. template <>
  643. __device__ __forceinline__ void load_shared<4>(void* dst, const void* src)
  644. {
  645. int32_t* data = reinterpret_cast<int32_t*>(dst);
  646. #ifdef PTX_AVAILABLE
  647. unsigned src_shr = internal::convert_to_shared(src);
  648. asm volatile("ld.shared.u32 {%0}, [%1];\n" : "=r"(*data) : "r"(src_shr));
  649. #else
  650. const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
  651. data[0] = src_cast[0];
  652. #endif
  653. }
  654. template <>
  655. __device__ __forceinline__ void load_shared<4>(void* dst, const void* src, bool do_access)
  656. {
  657. int32_t* data = reinterpret_cast<int32_t*>(dst);
  658. #ifdef PTX_AVAILABLE
  659. unsigned src_shr = internal::convert_to_shared(src);
  660. asm volatile(
  661. "{\n"
  662. "\t.reg .pred p;\n"
  663. "\tsetp.ne.b32 p, %2, 0;\n"
  664. "\tmov.b32 %0, 0;\n"
  665. "\t@p ld.shared.u32 %0, [%1];\n"
  666. "}\n"
  667. : "=r"(data[0])
  668. : "r"(src_shr), "r"((int)do_access));
  669. #else
  670. const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
  671. if (do_access) {
  672. data[0] = src_cast[0];
  673. } else {
  674. data[0] = 0;
  675. }
  676. #endif
  677. }
  678. /////////// Store Global ///////////
  679. template <>
  680. __device__ __forceinline__ void store_global<16>(void* dst, const void* src)
  681. {
  682. const uint4* data = reinterpret_cast<const uint4*>(src);
  683. #ifdef PTX_AVAILABLE
  684. asm volatile("st.global.wb.v4.u32 [%0], {%1, %2, %3, %4};\n"
  685. :
  686. : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
  687. : "memory");
  688. #else
  689. uint4* dst_cast = reinterpret_cast<uint4*>(dst);
  690. dst_cast[0] = data[0];
  691. #endif
  692. }
  693. template <>
  694. __device__ __forceinline__ void store_global<16, StorePolicy::CacheGlobal>(void* dst,
  695. const void* src)
  696. {
  697. const uint4* data = reinterpret_cast<const uint4*>(src);
  698. #ifdef PTX_AVAILABLE
  699. asm volatile("st.global.cg.v4.u32 [%0], {%1, %2, %3, %4};\n"
  700. :
  701. : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
  702. : "memory");
  703. #else
  704. uint4* dst_cast = reinterpret_cast<uint4*>(dst);
  705. dst_cast[0] = data[0];
  706. #endif
  707. }
  708. template <>
  709. __device__ __forceinline__ void store_global<16, StorePolicy::CacheStreaming>(void* dst,
  710. const void* src)
  711. {
  712. const uint4* data = reinterpret_cast<const uint4*>(src);
  713. #ifdef PTX_AVAILABLE
  714. asm volatile("st.global.cs.v4.u32 [%0], {%1, %2, %3, %4};\n"
  715. :
  716. : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
  717. : "memory");
  718. #else
  719. uint4* dst_cast = reinterpret_cast<uint4*>(dst);
  720. dst_cast[0] = data[0];
  721. #endif
  722. }
  723. template <>
  724. __device__ __forceinline__ void store_global<8>(void* dst, const void* src)
  725. {
  726. const uint2* data = reinterpret_cast<const uint2*>(src);
  727. #ifdef PTX_AVAILABLE
  728. asm volatile("st.global.wb.v2.u32 [%0], {%1, %2};\n"
  729. :
  730. : "l"(dst), "r"(data[0].x), "r"(data[0].y));
  731. #else
  732. uint2* dst_cast = reinterpret_cast<uint2*>(dst);
  733. dst_cast[0] = data[0];
  734. #endif
  735. }
  736. template <>
  737. __device__ __forceinline__ void store_global<8, StorePolicy::CacheGlobal>(void* dst,
  738. const void* src)
  739. {
  740. const uint2* data = reinterpret_cast<const uint2*>(src);
  741. #ifdef PTX_AVAILABLE
  742. asm volatile("st.global.cg.v2.u32 [%0], {%1, %2};\n"
  743. :
  744. : "l"(dst), "r"(data[0].x), "r"(data[0].y));
  745. #else
  746. uint2* dst_cast = reinterpret_cast<uint2*>(dst);
  747. dst_cast[0] = data[0];
  748. #endif
  749. }
  750. template <>
  751. __device__ __forceinline__ void store_global<8, StorePolicy::CacheStreaming>(void* dst,
  752. const void* src)
  753. {
  754. const uint2* data = reinterpret_cast<const uint2*>(src);
  755. #ifdef PTX_AVAILABLE
  756. asm volatile("st.global.cs.v2.u32 [%0], {%1, %2};\n"
  757. :
  758. : "l"(dst), "r"(data[0].x), "r"(data[0].y));
  759. #else
  760. uint2* dst_cast = reinterpret_cast<uint2*>(dst);
  761. dst_cast[0] = data[0];
  762. #endif
  763. }
  764. template <>
  765. __device__ __forceinline__ void store_global<4>(void* dst, const void* src)
  766. {
  767. const int32_t* data = reinterpret_cast<const int32_t*>(src);
  768. #ifdef PTX_AVAILABLE
  769. asm volatile("st.global.wb.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
  770. #else
  771. int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
  772. dst_cast[0] = data[0];
  773. #endif
  774. }
  775. template <>
  776. __device__ __forceinline__ void store_global<4, StorePolicy::CacheGlobal>(void* dst,
  777. const void* src)
  778. {
  779. const int32_t* data = reinterpret_cast<const int32_t*>(src);
  780. #ifdef PTX_AVAILABLE
  781. asm volatile("st.global.cg.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
  782. #else
  783. int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
  784. dst_cast[0] = data[0];
  785. #endif
  786. }
  787. template <>
  788. __device__ __forceinline__ void store_global<4, StorePolicy::CacheStreaming>(void* dst,
  789. const void* src)
  790. {
  791. const int32_t* data = reinterpret_cast<const int32_t*>(src);
  792. #ifdef PTX_AVAILABLE
  793. asm volatile("st.global.cs.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
  794. #else
  795. int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
  796. dst_cast[0] = data[0];
  797. #endif
  798. }
  799. template <>
  800. __device__ __forceinline__ void store_global<2>(void* dst, const void* src)
  801. {
  802. const int16_t* data = reinterpret_cast<const int16_t*>(src);
  803. int16_t* dst_cast = reinterpret_cast<int16_t*>(dst);
  804. dst_cast[0] = data[0];
  805. }
  806. template <>
  807. __device__ __forceinline__ void store_global<2, StorePolicy::CacheGlobal>(void* dst,
  808. const void* src)
  809. {
  810. const int16_t* data = reinterpret_cast<const int16_t*>(src);
  811. int16_t* dst_cast = reinterpret_cast<int16_t*>(dst);
  812. dst_cast[0] = data[0];
  813. }
  814. template <>
  815. __device__ __forceinline__ void store_global<2, StorePolicy::CacheStreaming>(void* dst,
  816. const void* src)
  817. {
  818. const int16_t* data = reinterpret_cast<const int16_t*>(src);
  819. int16_t* dst_cast = reinterpret_cast<int16_t*>(dst);
  820. dst_cast[0] = data[0];
  821. }
  822. /////////// Store Shared ///////////
  823. template <>
  824. __device__ __forceinline__ void store_shared<16>(void* dst, const void* src)
  825. {
  826. const uint4* data = reinterpret_cast<const uint4*>(src);
  827. #ifdef PTX_AVAILABLE
  828. unsigned dst_int = internal::convert_to_shared(dst);
  829. asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n"
  830. :
  831. : "r"(dst_int), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w));
  832. #else
  833. uint4* dst_cast = reinterpret_cast<uint4*>(dst);
  834. dst_cast[0] = data[0];
  835. #endif
  836. }
  837. template <>
  838. __device__ __forceinline__ void store_shared<8>(void* dst, const void* src)
  839. {
  840. const uint2* data = reinterpret_cast<const uint2*>(src);
  841. #ifdef PTX_AVAILABLE
  842. unsigned dst_int = internal::convert_to_shared(dst);
  843. asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
  844. :
  845. : "r"(dst_int), "r"(data[0].x), "r"(data[0].y));
  846. #else
  847. uint2* dst_cast = reinterpret_cast<uint2*>(dst);
  848. dst_cast[0] = data[0];
  849. #endif
  850. }
  851. template <>
  852. __device__ __forceinline__ void store_shared<4>(void* dst, const void* src)
  853. {
  854. const int32_t* data = reinterpret_cast<const int32_t*>(src);
  855. #ifdef PTX_AVAILABLE
  856. unsigned dst_int = internal::convert_to_shared(dst);
  857. asm volatile("st.shared.u32 [%0], %1;\n" : : "r"(dst_int), "r"(*data));
  858. #else
  859. int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
  860. dst_cast[0] = data[0];
  861. #endif
  862. }
  863. /////////// Asynchronous Memory Copy ///////////
  864. #ifdef ASYNC_COPY_AVAILABLE
  865. template <int AccessSize>
  866. __device__ __forceinline__ void memcpy_async(void* shr, const void* gbl)
  867. {
  868. static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
  869. unsigned shr_int = internal::convert_to_shared(shr);
  870. asm volatile("cp.async.ca.shared.global [%0], [%1], %2;\n"
  871. :
  872. : "r"(shr_int), "l"(gbl), "n"(AccessSize));
  873. }
  874. template <int AccessSize>
  875. __device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate)
  876. {
  877. static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
  878. unsigned shr_int = internal::convert_to_shared(shr);
  879. asm volatile(
  880. "{\n"
  881. " .reg .pred p;\n"
  882. " setp.ne.b32 p, %0, 0;\n"
  883. " @p cp.async.ca.shared.global [%1], [%2], %3;\n"
  884. "}\n"
  885. :
  886. : "r"((int)predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize));
  887. }
  888. template <int AccessSize>
  889. __device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate)
  890. {
  891. static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
  892. unsigned shr_int = internal::convert_to_shared(shr);
  893. int bytes_to_copy = (predicate ? AccessSize : 0);
  894. asm volatile("cp.async.ca.shared.global [%0], [%1], %2, %3;\n"
  895. :
  896. : "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy));
  897. }
  898. template <int AccessSize>
  899. __device__ __forceinline__ void memcpy_async_zero_nop(void* shr,
  900. const void* gbl,
  901. bool zero_predicate,
  902. bool nop_predicate)
  903. {
  904. static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
  905. unsigned shr_int = internal::convert_to_shared(shr);
  906. int bytes_to_copy = (zero_predicate ? AccessSize : 0);
  907. asm volatile(
  908. "{\n"
  909. " .reg .pred p;\n"
  910. " setp.ne.b32 p, %0, 0;\n"
  911. " @p cp.async.ca.shared.global [%1], [%2], %3, %4;\n"
  912. "}\n"
  913. :
  914. : "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy));
  915. }
  916. // Cache global variants. Separate interface to require deliberate use of them.
  917. __device__ __forceinline__ void memcpy_async_cg(void* shr, const void* gbl)
  918. {
  919. unsigned shr_int = internal::convert_to_shared(shr);
  920. asm volatile("cp.async.cg.shared.global [%0], [%1], 16;\n" : : "r"(shr_int), "l"(gbl));
  921. }
  922. __device__ __forceinline__ void memcpy_async_nop_cg(void* shr, const void* gbl, bool predicate)
  923. {
  924. unsigned shr_int = internal::convert_to_shared(shr);
  925. asm volatile(
  926. "{\n"
  927. " .reg .pred p;\n"
  928. " setp.ne.b32 p, %0, 0;\n"
  929. " @p cp.async.cg.shared.global [%1], [%2], 16;\n"
  930. "}\n"
  931. :
  932. : "r"((int)predicate), "r"(shr_int), "l"(gbl));
  933. }
  934. __device__ __forceinline__ void memcpy_async_zero_cg(void* shr, const void* gbl, bool predicate)
  935. {
  936. unsigned shr_int = internal::convert_to_shared(shr);
  937. int bytes_to_copy = (predicate ? 16 : 0);
  938. asm volatile("cp.async.cg.shared.global [%0], [%1], 16, %2;\n"
  939. :
  940. : "r"(shr_int), "l"(gbl), "r"(bytes_to_copy));
  941. }
  942. __device__ __forceinline__ void memcpy_async_zero_nop_cg(void* shr,
  943. const void* gbl,
  944. bool zero_predicate,
  945. bool nop_predicate)
  946. {
  947. unsigned shr_int = internal::convert_to_shared(shr);
  948. int bytes_to_copy = (zero_predicate ? 16 : 0);
  949. asm volatile(
  950. "{\n"
  951. " .reg .pred p;\n"
  952. " setp.ne.b32 p, %0, 0;\n"
  953. " @p cp.async.cg.shared.global [%1], [%2], 16, %3;\n"
  954. "}\n"
  955. :
  956. : "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "r"(bytes_to_copy));
  957. }
  958. __device__ __forceinline__ void memcpy_async_fence() { asm volatile("cp.async.commit_group;\n"); }
  959. template <int stages>
  960. __device__ __forceinline__ void memcpy_async_wait()
  961. {
  962. static_assert(stages <= 8);
  963. asm volatile("cp.async.wait_group %0;\n" : : "n"(stages));
  964. }
  965. // TODO: The tail complete should be a known compile time artifact, should try and induce this
  966. // without all of the branches from the call-site. This is a hacky solution.
  967. template <>
  968. __device__ __forceinline__ void tail_complete_wait<1>(int remaining_stages)
  969. {
  970. if (remaining_stages == 0) memcpy_async_wait<0>();
  971. }
  972. template <>
  973. __device__ __forceinline__ void tail_complete_wait<2>(int remaining_stages)
  974. {
  975. if (remaining_stages == 1)
  976. memcpy_async_wait<1>();
  977. else if (remaining_stages == 0)
  978. memcpy_async_wait<0>();
  979. }
  980. template <>
  981. __device__ __forceinline__ void tail_complete_wait<3>(int remaining_stages)
  982. {
  983. if (remaining_stages == 2)
  984. memcpy_async_wait<2>();
  985. else if (remaining_stages == 1)
  986. memcpy_async_wait<1>();
  987. else if (remaining_stages == 0)
  988. memcpy_async_wait<0>();
  989. }
  990. template <>
  991. __device__ __forceinline__ void tail_complete_wait<4>(int remaining_stages)
  992. {
  993. if (remaining_stages == 3)
  994. memcpy_async_wait<3>();
  995. else if (remaining_stages == 2)
  996. memcpy_async_wait<2>();
  997. else if (remaining_stages == 1)
  998. memcpy_async_wait<1>();
  999. else if (remaining_stages == 0)
  1000. memcpy_async_wait<0>();
  1001. }
  1002. template <>
  1003. __device__ __forceinline__ void tail_complete_wait<5>(int remaining_stages)
  1004. {
  1005. if (remaining_stages == 4)
  1006. memcpy_async_wait<4>();
  1007. else if (remaining_stages == 3)
  1008. memcpy_async_wait<3>();
  1009. else if (remaining_stages == 2)
  1010. memcpy_async_wait<2>();
  1011. else if (remaining_stages == 1)
  1012. memcpy_async_wait<1>();
  1013. else if (remaining_stages == 0)
  1014. memcpy_async_wait<0>();
  1015. }
  1016. template <>
  1017. __device__ __forceinline__ void tail_complete_wait<6>(int remaining_stages)
  1018. {
  1019. if (remaining_stages == 5)
  1020. memcpy_async_wait<5>();
  1021. else if (remaining_stages == 4)
  1022. memcpy_async_wait<4>();
  1023. else if (remaining_stages == 3)
  1024. memcpy_async_wait<3>();
  1025. else if (remaining_stages == 2)
  1026. memcpy_async_wait<2>();
  1027. else if (remaining_stages == 1)
  1028. memcpy_async_wait<1>();
  1029. else if (remaining_stages == 0)
  1030. memcpy_async_wait<0>();
  1031. }
  1032. #endif
  1033. } // namespace mem_access