How to get a correct SSAO result?

I’m new to shader and I’m going to implement SSAO in my project. I use a logic like this:

float4 MainPS(VertexShaderOutput input) : COLOR
{
    float3 fragPos = VSPositionFromDepth(input.TexCoords );
   
    
        
    
    float occlusion = 0.0;
    float radius =1;
      
    float3 sampleZ = float3(0, 0, 0);
    sampleZ = fragPos + sampleZ;
        
      
    float4 offsetZ = float4(sampleZ, 1.0);
    offsetZ = mul(projection, offsetZ);  
    offsetZ.xy /= offsetZ.w;  
    offsetZ.xy = offsetZ.xy * 0.5 + 0.5; 
      
    float sampleDepthZ = tex2D(gProjectionDepth, offsetZ.xy).r * 2 - 1;
    for (int i = 0; i < 16; ++i)
    {
   
        float3 sample = samples[i] ;
        sample =  fragPos+ sample;
        
       
        float4 offset = float4(sample, 1.0);
        offset = mul(projection , offset); 
        offset.xy  /= offset.w; 
        offset.xy  = offset.xy  * 0.5 + 0.5; 
     
       
        float sampleDepth = tex2D(gProjectionDepth, offset.xy).r* 2 - 1;
 
        occlusion += (sampleDepth > sampleDepthZ ? 0.0 :1.0);
         
    }
    
    occlusion = 1.0- (occlusion / 16);
    return float4(occlusion, occlusion, occlusion, 1);

}

float3 VSPositionFromDepth(float2 vTexCoord)
{
    // Get the depth value for this pixel
    float z = tex2D(gProjectionDepth, vTexCoord).r*2-1;
    // Get x/w and y/w from the viewport position
    float x = vTexCoord.x*2-1  ;
    float y = (1 - vTexCoord.y) * 2 - 1;
    float4 vProjectedPos = float4(x, y, z, 1 );
    // Transform by the inverse projection matrix
    float4 vPositionVS = mul(vProjectedPos, g_matInvProjection);
   
    // Divide by w to get the view-space position
    return vPositionVS.xyz / vPositionVS.w;
}

The pixel shader uses a clip space depth map and an array of sample points. First get the view-space position of the pixel, then transform it to clip space and sample depth as the center depth of the sample kernel, then use the same method to get the depth value of every random sample point, compare these depth values with the center depth value and get a result. The method sounds right but I got wrong result, the ao texture is like that:

The result texture is small and even glitchy if I rotate the camera. I cannot figure out why the texture is small all the time and I have been stuck here for three days. Am I going to completely rewrite the shader?

the geometry buffer shader is like that:

matrix World;
matrix View;
matrix Projection;

struct VertexShaderInput
{
    float4 Position : POSITION0;
    float3 Normal : NORMAL0;
    float2 TexureCoordinate : TEXCOORD0;
    float3 Tangent : TANGENT0;
};

struct VertexShaderOutput
{
    float4 Position  : SV_Position;
    float4 PositionV : TEXCOORD1;
    
    float4 PositionP : TEXCOORD4;
    float3 Normal : TEXCOORD2;
    float3 Tangent : TEXCOORD3;
};
struct PixelShaderOutput
{
    
    float4 ViewPosition : COLOR0;
    float4 ProjectionDepth : COLOR1;
    float4 Normal : COLOR2;
};

float3x3 inverse_mat3(float3x3 m)
{
    float Determinant =
       m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2])
     - m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2])
     + m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2]);
    
    float3x3 Inverse;
    Inverse[0][0] = +(m[1][1] * m[2][2] - m[2][1] * m[1][2]);
    Inverse[1][0] = -(m[1][0] * m[2][2] - m[2][0] * m[1][2]);
    Inverse[2][0] = +(m[1][0] * m[2][1] - m[2][0] * m[1][1]);
    Inverse[0][1] = -(m[0][1] * m[2][2] - m[2][1] * m[0][2]);
    Inverse[1][1] = +(m[0][0] * m[2][2] - m[2][0] * m[0][2]);
    Inverse[2][1] = -(m[0][0] * m[2][1] - m[2][0] * m[0][1]);
    Inverse[0][2] = +(m[0][1] * m[1][2] - m[1][1] * m[0][2]);
    Inverse[1][2] = -(m[0][0] * m[1][2] - m[1][0] * m[0][2]);
    Inverse[2][2] = +(m[0][0] * m[1][1] - m[1][0] * m[0][1]);
    Inverse /= Determinant;
    
    return Inverse;
} 
 
 
VertexShaderOutput MainVS(in VertexShaderInput input)
{
	VertexShaderOutput output = (VertexShaderOutput)0;
	
    float4 worldPosition = mul(input.Position, World);
    float4 viewPosition = mul(worldPosition, View);
    output.Position  = mul(viewPosition, Projection);
    
    output.PositionV = viewPosition;
    output.PositionP = mul(viewPosition, Projection);
    float3x3 worldView =   World*View;
    float3x3 normalMatrix = transpose(inverse_mat3(worldView));
     output.Tangent = mul(input.Tangent , normalMatrix);
    
    output.Normal = mul(input.Normal, normalMatrix);
     
	return output;
}

PixelShaderOutput MainPS(VertexShaderOutput input) 
{
    PixelShaderOutput psOut = (PixelShaderOutput) 0;
   
    psOut.ViewPosition.xyz = input.PositionV.xyz *0.5+0.5 ;
    psOut.ViewPosition.a = (input.PositionP.z /50) ;
   
    psOut.ProjectionDepth.rgb = (input.PositionP.z / input.PositionP.w)*0.5+0.5;
    psOut.ProjectionDepth.a = 1;
    psOut.Normal = float4(normalize(input.Normal) * 0.5 + 0.5, 1);
    return psOut;

}

It seems that the width and height value of the small AO texture is 0.1 times the whole texture width and height, but I haven’t found any 0.1 mulplication mistakes in my code. Why?

If I scale the small AO texture to make it fit the size of the render target, it will look like that:


still looks glitchy and strange.