@@ -145,6 +145,7 @@ struct ByteAddressBuffer
145
145
uint4 Load4(int location, out uint status);
146
146
147
147
[__readNone]
148
+ [ForceInline]
148
149
T Load< T> (int location)
149
150
{
150
151
return __byteAddressBufferLoad< T> (this , location);
@@ -325,8 +326,8 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,1,format>
325
326
__intrinsic_op($(kIROp_CombinedTextureSamplerGetSampler ))
326
327
SamplerComparisonState __getComparisonSampler();
327
328
328
- [ForceInline]
329
329
[__readNone]
330
+ [ForceInline]
330
331
[require(glsl_hlsl_spirv, texture_querylod)]
331
332
float CalculateLevelOfDetail(TextureCoord location)
332
333
{
@@ -346,8 +347,8 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,1,format>
346
347
}
347
348
}
348
349
349
- [ForceInline]
350
350
[__readNone]
351
+ [ForceInline]
351
352
[require(glsl_hlsl_spirv, texture_querylod)]
352
353
float CalculateLevelOfDetailUnclamped(TextureCoord location)
353
354
{
@@ -368,6 +369,7 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,1,format>
368
369
}
369
370
370
371
[__readNone]
372
+ [ForceInline]
371
373
[require(cpp_cuda_glsl_hlsl_spirv, texture_sm_4_1_fragment)]
372
374
T Sample(vector< float , Shape .dimensions + isArray> location)
373
375
{
@@ -417,6 +419,7 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,1,format>
417
419
}
418
420
419
421
[__readNone]
422
+ [ForceInline]
420
423
__glsl_extension(GL_ARB_sparse_texture_clamp)
421
424
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1_fragment)]
422
425
T Sample(vector< float , Shape .dimensions + isArray> location, vector< int , Shape .planeDimensions > offset, float clamp)
@@ -439,6 +442,7 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,1,format>
439
442
}
440
443
441
444
[__readNone]
445
+ [ForceInline]
442
446
__target_intrinsic (hlsl)
443
447
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1_fragment)]
444
448
T Sample(vector< float , Shape .dimensions + isArray> location, vector< int , Shape .planeDimensions > offset, float clamp, out uint status)
@@ -448,6 +452,7 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,1,format>
448
452
}
449
453
450
454
[__readNone]
455
+ [ForceInline]
451
456
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1_fragment)]
452
457
T SampleBias(vector< float , Shape .dimensions + isArray> location, float bias)
453
458
{
@@ -469,6 +474,7 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,1,format>
469
474
}
470
475
471
476
[__readNone]
477
+ [ForceInline]
472
478
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1_fragment)]
473
479
T SampleBias(vector< float , Shape .dimensions + isArray> location, float bias, constexpr vector< int , Shape .planeDimensions > offset)
474
480
{
@@ -599,6 +605,7 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,1,format>
599
605
}
600
606
601
607
[__readNone]
608
+ [ForceInline]
602
609
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1)]
603
610
T SampleGrad(vector< float , Shape .dimensions + isArray> location, vector< float , Shape .dimensions > gradX, vector< float , Shape .dimensions > gradY)
604
611
{
@@ -620,6 +627,7 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,1,format>
620
627
}
621
628
622
629
[__readNone]
630
+ [ForceInline]
623
631
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1)]
624
632
T SampleGrad(vector< float , Shape .dimensions + isArray> location, vector< float , Shape .dimensions > gradX, vector< float , Shape .dimensions > gradY, constexpr vector< int , Shape .dimensions > offset)
625
633
{
@@ -639,8 +647,9 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,1,format>
639
647
}
640
648
}
641
649
642
- __glsl_extension(GL_ARB_sparse_texture_clamp)
643
650
[__readNone]
651
+ [ForceInline]
652
+ __glsl_extension(GL_ARB_sparse_texture_clamp)
644
653
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1)]
645
654
T SampleGrad(vector< float , Shape .dimensions + isArray> location, vector< float , Shape .dimensions > gradX, vector< float , Shape .dimensions > gradY, constexpr vector< int , Shape .dimensions > offset, float lodClamp)
646
655
{
@@ -785,6 +794,7 @@ __generic<T:IFloat, Shape: __ITextureShape, let isArray:int, let isMS:int, let s
785
794
extension __TextureImpl< T,Shape,isArray,isMS,sampleCount,0 ,isShadow,0 ,format>
786
795
{
787
796
[__readNone]
797
+ [ForceInline]
788
798
[require(cpp_cuda_glsl_hlsl_spirv, texture_sm_4_1_fragment)]
789
799
T Sample(SamplerState s, vector< float , Shape .dimensions + isArray> location)
790
800
{
@@ -837,6 +847,7 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,0,format>
837
847
}
838
848
839
849
[__readNone]
850
+ [ForceInline]
840
851
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1_fragment)]
841
852
T Sample(SamplerState s, vector< float , Shape .dimensions + isArray> location, constexpr vector< int , Shape .planeDimensions > offset)
842
853
{
@@ -858,6 +869,7 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,0,format>
858
869
}
859
870
860
871
[__readNone]
872
+ [ForceInline]
861
873
__glsl_extension(GL_ARB_sparse_texture_clamp)
862
874
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1_fragment)]
863
875
T Sample(SamplerState s, vector< float , Shape .dimensions + isArray> location, constexpr vector< int , Shape .planeDimensions > offset, float clamp)
@@ -880,15 +892,17 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,0,format>
880
892
}
881
893
}
882
894
883
- [__readNone]
884
895
__target_intrinsic (hlsl)
896
+ [__readNone]
897
+ [ForceInline]
885
898
T Sample(SamplerState s, vector< float , Shape .dimensions + isArray> location, constexpr vector< int , Shape .planeDimensions > offset, float clamp, out uint status)
886
899
{
887
900
status = 0 ;
888
901
return Sample(s, location, offset, clamp);
889
902
}
890
903
891
904
[__readNone]
905
+ [ForceInline]
892
906
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1_fragment)]
893
907
T SampleBias(SamplerState s, vector< float , Shape .dimensions + isArray> location, float bias)
894
908
{
@@ -910,6 +924,7 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,0,format>
910
924
}
911
925
912
926
[__readNone]
927
+ [ForceInline]
913
928
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1_fragment)]
914
929
T SampleBias(SamplerState s, vector< float , Shape .dimensions + isArray> location, float bias, constexpr vector< int , Shape .planeDimensions > offset)
915
930
{
@@ -930,7 +945,8 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,0,format>
930
945
}
931
946
}
932
947
933
- [__readNone] [ForceInline]
948
+ [__readNone]
949
+ [ForceInline]
934
950
[require(glsl_hlsl_spirv, texture_shadowlod)]
935
951
float SampleCmp(SamplerComparisonState s, vector< float , Shape .dimensions + isArray> location, float compareValue)
936
952
{
@@ -960,7 +976,8 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,0,format>
960
976
}
961
977
}
962
978
963
- [__readNone] [ForceInline]
979
+ [__readNone]
980
+ [ForceInline]
964
981
[require(glsl_hlsl_spirv, texture_shadowlod)]
965
982
float SampleCmpLevelZero(SamplerComparisonState s, vector< float , Shape .dimensions + isArray> location, float compareValue)
966
983
{
@@ -987,7 +1004,8 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,0,format>
987
1004
}
988
1005
}
989
1006
990
- [__readNone] [ForceInline]
1007
+ [__readNone]
1008
+ [ForceInline]
991
1009
[require(glsl_hlsl_spirv, texture_shadowlod)]
992
1010
float SampleCmp(SamplerComparisonState s, vector< float , Shape .dimensions + isArray> location, float compareValue, constexpr vector< int , Shape .planeDimensions > offset)
993
1011
{
@@ -1013,7 +1031,8 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,0,format>
1013
1031
}
1014
1032
}
1015
1033
1016
- [__readNone] [ForceInline]
1034
+ [__readNone]
1035
+ [ForceInline]
1017
1036
[require(glsl_hlsl_spirv, texture_shadowlod)]
1018
1037
float SampleCmpLevelZero(SamplerComparisonState s, vector< float , Shape .dimensions + isArray> location, float compareValue, constexpr vector< int , Shape .planeDimensions > offset)
1019
1038
{
@@ -1041,6 +1060,7 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,0,format>
1041
1060
}
1042
1061
1043
1062
[__readNone]
1063
+ [ForceInline]
1044
1064
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1)]
1045
1065
T SampleGrad(SamplerState s, vector< float , Shape .dimensions + isArray> location, vector< float , Shape .dimensions > gradX, vector< float , Shape .dimensions > gradY)
1046
1066
{
@@ -1062,6 +1082,7 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,0,format>
1062
1082
}
1063
1083
1064
1084
[__readNone]
1085
+ [ForceInline]
1065
1086
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1)]
1066
1087
T SampleGrad(SamplerState s, vector< float , Shape .dimensions + isArray> location, vector< float , Shape .dimensions > gradX, vector< float , Shape .dimensions > gradY, constexpr vector< int , Shape .dimensions > offset)
1067
1088
{
@@ -1083,8 +1104,9 @@ extension __TextureImpl<T,Shape,isArray,isMS,sampleCount,0,isShadow,0,format>
1083
1104
}
1084
1105
}
1085
1106
1086
- __glsl_extension(GL_ARB_sparse_texture_clamp)
1087
1107
[__readNone]
1108
+ [ForceInline]
1109
+ __glsl_extension(GL_ARB_sparse_texture_clamp)
1088
1110
[require(cpp_glsl_hlsl_spirv, texture_sm_4_1)]
1089
1111
T SampleGrad(SamplerState s, vector< float , Shape .dimensions + isArray> location, vector< float , Shape .dimensions > gradX, vector< float , Shape .dimensions > gradY, constexpr vector< int , Shape .dimensions > offset, float lodClamp)
1090
1112
{
@@ -2813,7 +2835,6 @@ ${{{{
2813
2835
[__requiresNVAPI]
2814
2836
[ForceInline]
2815
2837
__cuda_sm_version(2 . 0 )
2816
- [ForceInline]
2817
2838
[require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda_float1)]
2818
2839
void InterlockedAddF32(uint byteAddress, float valueToAdd)
2819
2840
{
@@ -2834,7 +2855,6 @@ ${{{{
2834
2855
// Int64 Add
2835
2856
[ForceInline]
2836
2857
__cuda_sm_version(6 . 0 )
2837
- [ForceInline]
2838
2858
[require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda6_int64)]
2839
2859
void InterlockedAddI64(uint byteAddress, int64_t valueToAdd, out int64_t originalValue)
2840
2860
{
@@ -2858,15 +2878,13 @@ ${{{{
2858
2878
[require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda6_int64)]
2859
2879
void InterlockedAddI64(uint byteAddress, int64_t valueToAdd);
2860
2880
2861
- [ForceInline]
2862
2881
__specialized_for_target(hlsl)
2863
2882
[ForceInline]
2864
2883
void InterlockedAddI64(uint byteAddress, int64_t valueToAdd)
2865
2884
{
2866
2885
__atomicAdd(this , byteAddress, __asuint2(valueToAdd));
2867
2886
}
2868
2887
2869
- [ForceInline]
2870
2888
__specialized_for_target(glsl)
2871
2889
__specialized_for_target(spirv)
2872
2890
[ForceInline]
@@ -2906,6 +2924,7 @@ ${{{{
2906
2924
uint64_t InterlockedMaxU64(uint byteAddress, uint64_t value);
2907
2925
2908
2926
__specialized_for_target(hlsl)
2927
+ [ForceInline]
2909
2928
uint64_t InterlockedMaxU64(uint byteAddress, uint64_t value) { return __asuint64(__atomicMax(this , byteAddress, __asuint2(value))); }
2910
2929
2911
2930
__specialized_for_target(glsl)
@@ -2965,6 +2984,7 @@ ${{{{
2965
2984
uint64_t InterlockedMinU64(uint byteAddress, uint64_t value);
2966
2985
2967
2986
__specialized_for_target(hlsl)
2987
+ [ForceInline]
2968
2988
uint64_t InterlockedMinU64(uint byteAddress, uint64_t value) { return __asuint64(__atomicMin(this , byteAddress, __asuint2(value))); }
2969
2989
2970
2990
__specialized_for_target(glsl)
@@ -3024,6 +3044,7 @@ ${{{{
3024
3044
uint64_t InterlockedAndU64(uint byteAddress, uint64_t value);
3025
3045
3026
3046
__specialized_for_target(hlsl)
3047
+ [ForceInline]
3027
3048
uint64_t InterlockedAndU64(uint byteAddress, uint64_t value) { return __asuint64(__atomicAnd(this , byteAddress, __asuint2(value))); }
3028
3049
3029
3050
__specialized_for_target(glsl)
@@ -3063,6 +3084,7 @@ ${{{{
3063
3084
uint64_t InterlockedOrU64(uint byteAddress, uint64_t value);
3064
3085
3065
3086
__specialized_for_target(hlsl)
3087
+ [ForceInline]
3066
3088
uint64_t InterlockedOrU64(uint byteAddress, uint64_t value) { return __asuint64(__atomicOr(this , byteAddress, __asuint2(value))); }
3067
3089
3068
3090
__specialized_for_target(glsl)
@@ -3102,6 +3124,7 @@ ${{{{
3102
3124
uint64_t InterlockedXorU64(uint byteAddress, uint64_t value);
3103
3125
3104
3126
__specialized_for_target(hlsl)
3127
+ [ForceInline]
3105
3128
uint64_t InterlockedXorU64(uint byteAddress, uint64_t value) { return __asuint64(__atomicXor(this , byteAddress, __asuint2(value))); }
3106
3129
3107
3130
__specialized_for_target(glsl)
@@ -3140,6 +3163,7 @@ ${{{{
3140
3163
uint64_t InterlockedExchangeU64(uint byteAddress, uint64_t value);
3141
3164
3142
3165
__specialized_for_target(hlsl)
3166
+ [ForceInline]
3143
3167
uint64_t InterlockedExchangeU64(uint byteAddress, uint64_t value) { return __asuint64(__atomicExchange(this , byteAddress, __asuint2(value))); }
3144
3168
3145
3169
__specialized_for_target(glsl)
@@ -3255,6 +3279,7 @@ ${{{{
3255
3279
return ;
3256
3280
}
3257
3281
}
3282
+
3258
3283
[ForceInline]
3259
3284
void InterlockedCompareExchange64(uint byteAddress, uint64_t compareValue, uint64_t value, out uint64_t outOriginalValue)
3260
3285
{
0 commit comments