Fragment shader not working as expected

I am porting the following shader into Godot. The results seem not correct though. Apart from the wrong colours there seems to be no change with time. What is the problem with it?

shader_type canvas_item;  const float DELTA = 0.00001; const float TAU = 6.28318530718; const float NOISE_TEXTURE_SIZE = 256.0; const float NOISE_TEXTURE_PIXEL_COUNT = (NOISE_TEXTURE_SIZE * NOISE_TEXTURE_SIZE);  uniform vec4 vColor: color;  // MAIN CONTROLLER UNIFORMS uniform float intensity = 1.;       // overall effect intensity, 0-1 (no upper limit) uniform float rngSeed = .0;         // seed offset (changes configuration around) uniform sampler2D noiseTexture;     // noise texture sampler  //TUNING uniform float lineSpeed = .01;          // line speed uniform float lineDrift = .1;           // horizontal line drifting uniform float lineResolution = 1.;      // line resolution uniform float lineVertShift = .0;       // wave phase offset of horizontal lines uniform float lineShift = .004;         // horizontal shift uniform float jumbleness = .2;          // amount of "block" glitchiness uniform float jumbleResolution = .2;    // resolution of blocks uniform float jumbleShift = .15;        // texture shift by blocks   uniform float jumbleSpeed = 1.;         // speed of block variation uniform float dispersion = .0025;       // color channel horizontal dispersion uniform float channelShift = .004;      // horizontal RGB shift uniform float noiseLevel = .5;          // level of noise uniform float shakiness = .5;           // horizontal shakiness //  float rand(vec2 co){     return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); }  vec4 extractRed(vec4 col){     return vec4(col.r, 0., 0., col.a); }  vec4 extractGreen(vec4 col){     return vec4(0., col.g, 0., col.a); }  vec4 extractBlue(vec4 col){     return vec4(0., 0., col.b, col.a); }  // Replacement for the mirror address mode, hopefully nobody needs filtering. vec2 mirror(vec2 v) {     return abs((fract((v * 0.5) + 0.5) * 2.0) - 1.0); }  vec2 downsample(vec2 v, vec2 res) {         // Division by zero protected by uniform getters.     return floor(v * res) / res; }  // Fetches four random values from an RGBA noise texture vec4 whiteNoise(vec2 coord, vec2 texelOffset, vec2 resolution) {     vec2 offset = downsample(vec2(rngSeed * NOISE_TEXTURE_SIZE, rngSeed) + texelOffset, vec2(NOISE_TEXTURE_SIZE));     vec2 ratio = resolution / vec2(NOISE_TEXTURE_SIZE);     return texture(noiseTexture, (coord * ratio) + offset);  }  // Fetch noise texture texel based on single offset in the [0-1] range vec4 random(float dataOffset) {     vec2 halfTexelSize = vec2((0.5 / NOISE_TEXTURE_SIZE));     float offset = rngSeed + dataOffset;         return texture(noiseTexture, vec2(offset * NOISE_TEXTURE_SIZE, offset) + halfTexelSize);  }  // Jumble coord generation vec2 jumble(vec2 coord, float time, vec2 resolution){     // Static branch.     if ((jumbleShift * jumbleness * jumbleResolution) < DELTA) {         return vec2(0.0);     }              vec2 gridCoords = (coord * jumbleResolution) / (NOISE_TEXTURE_SIZE * 0.0245);     float jumbleTime = mod(floor(time * 0.02 * jumbleSpeed), NOISE_TEXTURE_PIXEL_COUNT);     vec2 offset = random(jumbleTime / NOISE_TEXTURE_PIXEL_COUNT).ga * jumbleResolution;     vec4 cellRandomValues = whiteNoise(gridCoords, vec2(jumbleResolution * -10.0) + offset, resolution);     return (cellRandomValues.ra - 0.5) * jumbleShift * floor(min(0.99999, cellRandomValues.b) + jumbleness); }  // Horizontal line offset generation float lineOffset(vec2 coord, vec2 uv, float time, vec2 resolution) {     // Static branch.     if (lineShift < DELTA) {         return 0.0;     }          // Wave offsets     vec2 waveHeights = vec2(50.0 * lineResolution, 25.0 * lineResolution);         vec4 lineRandom = whiteNoise(downsample(uv.yy, waveHeights), vec2(0.0), resolution);     float driftTime = uv.y * resolution.y * 2.778;          // XY: big waves, ZW: drift waves     vec4 waveTimes = (vec4(downsample(lineRandom.ra * TAU, waveHeights) * 80.0, driftTime + 2.0, (driftTime * 0.1) + 1.0) + (time * lineSpeed)) + (lineVertShift * TAU);     vec4 waveLineOffsets = vec4(sin(waveTimes.x), cos(waveTimes.y), sin(waveTimes.z), cos(waveTimes.w));     waveLineOffsets.xy *= ((whiteNoise(waveTimes.xy, vec2(0.0), resolution).gb - 0.5) * shakiness) + 1.0; *= lineDrift;     return dot(waveLineOffsets, vec4(1.0)); }  void fragment() {     vec3 randomValues = vec3(rand(vec2(TIME, 0.0)), rand(vec2(TIME, 0.0)), rand(vec2(TIME, 0.0)));     vec2 resolution = 1.0 / SCREEN_PIXEL_SIZE;     vec2 uv = FRAGCOORD.xy / (1.0 / SCREEN_PIXEL_SIZE).xy;          // Sample random high-frequency noise     vec4 randomHiFreq = whiteNoise(uv, randomValues.xy, resolution);          // Apply line offsets     vec2 offsetCoords = uv;     offsetCoords.x += ((((2.0 * randomValues.z) - 1.0) * shakiness * lineSpeed) + lineOffset(offsetCoords, uv, TIME, resolution)) * lineShift * intensity;          // Apply jumbles     offsetCoords += jumble(offsetCoords, TIME, resolution) * intensity * intensity * 0.25;              // Channel split     vec2 shiftFactors = (channelShift + (randomHiFreq.rg * dispersion)) * intensity;     vec4 outColour;          // Static branch.     if (((channelShift + dispersion) * intensity) < DELTA) {         outColour = texture(SCREEN_TEXTURE, mirror(offsetCoords));     } else {         outColour = extractRed(texture(SCREEN_TEXTURE, mirror(offsetCoords + vec2(shiftFactors.r, 0.0)))) + extractBlue(texture(SCREEN_TEXTURE, mirror(offsetCoords + vec2(-shiftFactors.g, 0.0)))) + extractGreen(texture(SCREEN_TEXTURE, mirror(offsetCoords)));     }          // Add noise         outColour.rgb *= (vec3(.55, .5, .4) * randomHiFreq.gab * intensity * noiseLevel) + 1.0;              // COLOR = vColor * outColour;     COLOR = outColour; } 

enter image description here

Hiding UIs behind a custom image shader

Is there a way to hide UIs such as text, images etc. behind another transparent image? The problem is that I am using the latest Unity version and these custom shaders have no effect on hiding the UI shaders. The two custom shaders I have applied for testing are:

Shader "Custom/DepthReserve" {     Properties     {     }     SubShader     {         Tags { "RenderType" = "Opaque" "Queue"="Geometry-1" }         LOD 100           Blend Zero One           Pass         {           }     } } 
Shader "Custom/InvisibleMask" {   SubShader {     // draw after all opaque objects (queue = 2001):     Tags { "Queue"="Geometry+1" }     Pass {       Blend Zero One // keep the image behind it     }   } } 

So is there a way to use a transparent image to hide text, images and even gameobjects?

OpenGL GLSL ES 3.10 – Referencing a uniform variable, causes the vertex shader to not draw anything

I have this project, that has a default shader, that just draws models and textures. Recently I decided to add a second shader that does a fancy effect, and is used only on some of the objects drawn.

After compiling the project for Linux or Windows, it all works as expected. When compiling the project to Android, only on specific devices, the new shader doesn’t work, while on other devices I tried, it all works.

My shaders

Below is my default vertex shader specifically made for Android devices, this one works on all devices and draws everything without any editing or effect. As far as I understand, the fragment shaders work, so I’ll omit them.

    #version 310 es      in vec4 position;     in vec3 colour;     in vec2 texCoord;      uniform mat4 matrix;     uniform mat4 matrixProjection;      out vec2 outTexCoord;     out vec4 outColour;      void main() {             gl_Position = matrixProjection *matrix *position;             outTexCoord  = texCoord;             outColour  = vec4(colour.rgb, 1);     } 

I hope this looks fairly straight-forward. matrixProjection is the projection matrix, and matrix is the model-view matrix. They both work as expected and I’m able to render a whole scene without issue.

Now here is a simplified version of my new shader:

    #version 310 es      in vec4 position;     in vec3 colour;     in vec2 texCoord;      uniform mat4 matrix;     uniform mat4 matrixProjection;     uniform float animationCurrent;      out vec2 outTexCoord;     out vec4 outColour;      void main() {             gl_Position = matrixProjection *matrix *position;              if (animationCurrent > 0.0) {                     gl_Position.y += 5.0;             }              outColour = vec4(colour.rgb, 1.0);             outTexCoord  = texCoord;     } 

The only difference of the new shader is the new uniform animationCurrent, and an extra if statement that will modify the gl_Position.y of some vertices. Any object that is using this shader, is not drawn at all on the screen on some devices.

What I’ve tried

From the new shader, if I remove the entire if statement, it all works and it displays objects as-is. If I replace the if statement with if (true) it still works, but it just displays all vertices of objects drawn with it slightly higher. If I replace it with if (false) it still works as expected.

So for some reason, just referencing animationCurrent causes the object to not be drawn.

I also tried replacing the if statement with if (matrix[0][0] > 0.0) and it still draws the object, it looks like there’s something specifically wrong with animationCurrent. I tried adding another matrix uniform variable, and set its value the same way as I do matrix, but it wouldn’t draw the object either.

This should mean that the value of animationCurrent is not relevant, and the fact that it’s a uniform float doesn’t matter either.


The problem occurs on an android phone with this hardware:

Device: Moto E (4) Plus - 7.1.1 Vendor graphic card: ARM Renderer: Mali-T720 Version OpenGL: OpenGL ES 3.1 v1.r12p1-01alp0.62f282720426ab7712f1c6b996a6dc82 Version GLSL: OpenGL ES GLSL ES 3.10 

And this android tablet with similar hardware:

Device: Kindle Fire 8 Vendor graphic card: ARM Renderer: Mali-T720 Version GL: OpenGL ES 3.1 v1.r26p0-01rel0.526d936ea9da20486773a9aaceecd920 Version GLSL: OpenGL ES GLSL ES 3.10 

This is an android tablet where everything works as expected:

Device: Lenovo TB-X505F - 10 Vendor graphic card: Qualcomm Renderer: Adreno (TM) 504 Version GL: OpenGL ES 3.2 V@415.0 (GIT@f345350, I0760943699, 1580221225) (Date:01/28/20) Version GLSL: OpenGL ES GLSL ES 3.20 

And here is a slightly older device that works. I’ve modified the shader a bit to support an older glsl version, but the idea is the same:

Device: Kindle Fire 7 Vendor graphic card: ARM Renderer: Mali-450 MP Version GL: OpenGL ES 2.0 Version GLSL: OpenGL ES GLSL ES 1.00 


My primary goal, is to understand what is causing this. Have I missed something very obvious? Is this a very edge-case bug related to the hardware?

I’m still learning how to support different devices with different versions of glsl, so it’s very likely I’ve missed something.

Any information you have, let me know. I’m willing to try a few things on different devices to find more about this issue.

Cylindrical billboarding around an arbitrary axis in geometry shader

I found an answer on this site relating to this question already, but it doesn’t seem applicable in the context of my project.

Basically I’d like to create a method which fits this signature:

float3x3 AxisBillboard(float3 source, float3 target, float3 axis) 

That is to say, when given a source point (i.e an object’s position in world space), a target (camera position in world space), and an axis (the object’s up vector, which is not necessarily the global y axis), it produces a 3×3 rotation matrix by which I can multiply the vertices of my point so that it’s properly rotated.

I’ve found many solutions and tutorials online which work nicely assuming I only want to rotate around the y axis.

For example, here’s a solution which billboards around the global y axis:

float3 dir = normalize(target - source);      float angleY = atan2(dir.x, dir.z); c = cos(angleY); s = sin(angleY);      float3x3 rotYMatrix; rotYMatrix[0].xyz = float3(c, 0, s); rotYMatrix[1].xyz = float3(0, 1, 0); rotYMatrix[2].xyz = float3(-s, 0, c); 

For context, I’m working on a grass shader, and each individual blade of grass should be billboarded to face the camera while remaining aligned with the normal of the terrain.

For dynamically batched objects the `unity_ObjectToWorld` shader variable is always set to default?

I’m using the standard render pipeline and the unity_ObjectToWorld variable for some calculations in my shader. After I enabled dynamic batching these calculations got broken. It seems that unity_ObjectToWorld is set to default when the objects are being batched. Is it by design? I didn’t find anything in the documentation.

Floodfilling a texture in HLSL Compute shader

I have a very large texture which I want to fill with values representing "distance in units from a river tile". The texture is already seeded with these origin river points (meaning distance/value = 0 in them).

From these points I want to floodfill outwards, incrementing the value with each step from these origin points.

Doing this on the CPU is no problem using a stack or similiar structure but ideally I want to do this in the middle of a Compute shader execution which runs over the entire texture.

I have read this which sound similiar to what I want to do but it mentions there might be a smarter way to do this with Compute shaders – which is what I am using.

Any ideas on how to solve this on compute shaders?

Difference between shader input element classification between D3D12 and Vulkan

I’m confused about the difference between the shader input element classifications in D3D12 and Vulkan. For example, in Vulkan I could have the following declarations:

struct vertex {     glm::vec3 pos;     glm::vec3 col; };  VkVertexInputBindingDescription input_binding_description {     .binding = 0,     .stride = sizeof(vertex),     .inputRate = VK_VERTEX_INPUT_RATE_VERTEX };  std::array<VkVertexInputAttributeDescription, 2> input_attribute_descriptions {     VkVertexInputAttributeDescription{         .location = 0,         .format = VK_FORMAT_R32G32B32_SFLOAT,         .offset = offsetof(vertex, pos)     },         VkVertexInputAttributeDescription{         .location = 1,         .format = VK_FORMAT_R32G32B32_SFLOAT,         .offset = offsetof(vertex, col)     } }; 

Here the input rate is specified per vertex and not per attribute. On the other hand, in D3D12, we would have

struct Vertex {     XMFLOAT3 pos;     XMFLOAT3 col; };  std::array<D3D12_INPUT_ELEMENT_DESC, 2> input_element_descs = {     { "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0,     D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },     { "COLOR", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12,     D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 } }; 

And as you can see, the input rate is specified per attribute. Why is that? Is there a difference between the meaning of the input classification in D3d12 and Vulkan that I’m missing? I’m not familiar with D3D12, but at first glance, it doesn’t make sense to me to have different input classifications for the attributes of a vertex.

How to use an overlapping shader so that it uses layers sort from sprite renderer and its transparent

Im using the shader from question question

, but once I put it in the game, and my character gets close to it, it will be hidden by the shadow of the tree:

enter image description here

Any way to make the shader transparent and/or maybe use the sprite renderer sort layer system

I have been trying to use stencil configurable shader with no luck Thank you for your help

Not sure what combination of these should work: enter image description here

The character is using a normal sprite renderer.

Sending light data from Vertex Shader to Pixel Shader?

We have a pixel shader constant buffer that contains the light data for the item that is currently rendered.

To implement tangent space normal mapping, i would need to transform each light into tangent space.

Is it a valid approach to instead bind the light data to the vertex shader and then send the transformed Light Data to the pixel shader since the vertex shader runs less frequently than the pixel shader?

As a side note: I am using forward rendering with a limited light count.
Our pixel shader constant buffer looks like this:

struct RendererLight {     float3 Position;     float3 Color;     float3 Direction;     float Intensity;     float In;     float Out;     float Range; };  cbuffer LightsBuffer : register(b2) {     RendererLight Lights[8];     uint NumLights; };