Open
Description
For the following simple shader
#version 460
#extension GL_EXT_shader_8bit_storage : require
#extension GL_EXT_shader_16bit_storage : require
#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
layout(binding = 1 ) uniform _16bit_storage
{
i16vec4 i16v4;
};
// This is read back and checked on the CPU side to verify the converions
layout(binding = 2 ) writeonly buffer ConversionOutBuffer
{
i8vec4 i16v4_to_i8v4;
} cob;
out vec4 fcolor;
void main()
{
// Conversions
{
cob.i16v4_to_i8v4 = i8vec4(i16v4);
}
bool RED = true;
bool GREEN = false;
fcolor = vec4( (RED) ? 1.0f : 0.0f,
(GREEN) ? 1.0f : 0.0f,
0.0f, 1.0f);
}
We see some weird codegen when loading members from UBO and doing a convert
%19 = OpAccessChain %_ptr_Uniform_v4short %_ %int_0
%20 = OpLoad %v4short %19
%22 = OpCompositeExtract %int %20 0
%23 = OpCompositeExtract %int %20 1
%24 = OpCompositeExtract %int %20 2
%25 = OpCompositeExtract %int %20 3
%26 = OpCompositeConstruct %v4int %22 %23 %24 %25
%27 = OpSConvert %v4char %26
OpCompositeExtract tried to extract as 'int' when %20 is a vector of i16/shorts
Activity