2017-08-01 90 views
0

最近我一直在困惑,因爲我一直試圖讓THREE.DepthTexture與Ambient Occlusion着色器一起使用。我已經受夠了與RGBA壓縮前的工作,但閱讀馬特Deslauriers的項目,Audiograph後,我決定嘗試他一個潛在的性能提升所描述的方法:SSAO不正確depthTexture

歷史上,ThreeJS,你會渲染場景與 MeshDepthMaterial到WebGLRenderTarget,然後在從深度目標進行採樣時解壓縮到線性深度值 。由於許多環境支持WEBGL_depth_texture擴展,所以這相當昂貴並且通常是不必要的。

嘗試此方法後,我莫名其妙地結束了這個奇怪的不必要的效果,其中線都在地形:

Lines over terrain

我已經安裝下面是我重複了這個問題的一個小例子。我覺得這是非常明顯的,我只是在淡淡地說。

我希望這裏有人能夠指出我錯過了什麼,以便我能夠以更高性能的方式獲得環境光遮擋。

非常感謝提前。

const scene = new THREE.Scene(); 
 
const camera = new THREE.PerspectiveCamera(75, window.innerWidth/window.innerHeight, 0.1, 2000); 
 

 
const pivot = new THREE.Object3D(); 
 
pivot.add(camera); 
 
scene.add(pivot); 
 

 
camera.position.set(0, 250, 500); 
 
camera.lookAt(pivot.position); 
 

 
const renderer = new THREE.WebGLRenderer(); 
 
renderer.setSize(window.innerWidth, window.innerHeight); 
 
renderer.gammaInput = true; 
 
renderer.gammaOutput = true; 
 
renderer.gammaFactor = 2.2; 
 

 
let supportsExtension = false; 
 

 
if (renderer.extensions.get('WEBGL_depth_texture')) { 
 
    supportsExtension = true; 
 
} 
 

 
document.body.appendChild(renderer.domElement); 
 

 
const createCube =() => { 
 
\t const geo = new THREE.BoxGeometry(500, 500, 500); 
 
    const mat = new THREE.MeshBasicMaterial({ color: 0x00ff00 }); 
 
    const obj = new THREE.Mesh(geo, mat); 
 
    obj.position.y = -(obj.geometry.parameters.height/2); 
 
    scene.add(obj); 
 
} 
 

 
const createSphere =() => { 
 
\t const geo = new THREE.SphereGeometry(100, 12, 8); 
 
    const mat = new THREE.MeshBasicMaterial({ color: 0xff00ff }); 
 
    const obj = new THREE.Mesh(geo, mat); 
 
    obj.position.y = obj.geometry.parameters.radius; 
 
    scene.add(obj); 
 
} 
 

 
// Create objects 
 
createCube(); 
 
createSphere(); 
 

 
const composer = new THREE.EffectComposer(renderer); 
 

 
const target = new THREE.WebGLRenderTarget(window.innerWidth, window.innerHeight); 
 
target.texture.format = THREE.RGBFormat; 
 
target.texture.minFilter = THREE.NearestFilter; 
 
target.texture.magFilter = THREE.NearestFilter; 
 
target.texture.generateMipmaps = false; 
 
target.stencilBuffer = false; 
 
target.depthBuffer = true; 
 
target.depthTexture = new THREE.DepthTexture(); 
 
target.depthTexture.type = THREE.UnsignedShortType; 
 

 
function initPostProcessing() { 
 
\t composer.addPass(new THREE.RenderPass(scene, camera)); 
 

 
    const pass = new THREE.ShaderPass({ 
 
    \t uniforms: { 
 
\t  "tDiffuse":  { value: null }, 
 
\t  "tDepth":  { value: target.depthTexture }, 
 
\t  "resolution": { value: new THREE.Vector2(512, 512) }, 
 
\t  "cameraNear": { value: 1 }, 
 
\t  "cameraFar": { value: 100 }, 
 
    \t "onlyAO":  { value: 0 }, 
 
    \t "aoClamp":  { value: 0.5 }, 
 
    \t "lumInfluence": { value: 0.5 } 
 
    \t }, 
 
    \t vertexShader: document.getElementById('vertexShader').textContent, 
 
    \t fragmentShader: document.getElementById('fragmentShader').textContent, 
 
    }); 
 
\t pass.material.precision = 'highp'; 
 
    composer.addPass(pass); 
 
    pass.uniforms.tDepth.value = target.depthTexture; 
 
    pass.uniforms.cameraNear.value = camera.near; 
 
    pass.uniforms.cameraFar.value = camera.far; 
 
    
 
    composer.passes[composer.passes.length - 1].renderToScreen = true; 
 
} 
 

 
initPostProcessing(); 
 

 
const animate =() => { 
 
\t requestAnimationFrame(animate); 
 

 
\t pivot.rotation.y += 0.01; 
 

 
    renderer.render(scene, camera, target); 
 

 
    composer.render(); 
 
} 
 

 
animate();
html, body { margin: 0; } 
 
canvas { display: block; width: 100%; height: 100%; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/86/three.js"></script> 
 
<script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/postprocessing/EffectComposer.js"></script> 
 
<script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/postprocessing/RenderPass.js"></script> 
 
<script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/postprocessing/ShaderPass.js"></script> 
 
<script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/shaders/CopyShader.js"></script> 
 

 
<script id="vertexShader" type="x-shader/x-vertex"> 
 
\t varying vec2 vUv; 
 
    
 
    void main() { 
 
    \t 
 
    vUv = uv; 
 
    
 
    gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0); 
 
    
 
    } 
 
</script> 
 
<script id="fragmentShader" type="x-shader/x-fragment"> 
 
uniform float cameraNear; 
 
uniform float cameraFar; 
 

 
uniform bool onlyAO;  // use only ambient occlusion pass? 
 

 
uniform vec2 resolution;  // texture width, height 
 
uniform float aoClamp; // depth clamp - reduces haloing at screen edges 
 

 
uniform float lumInfluence; // how much luminance affects occlusion 
 

 
uniform sampler2D tDiffuse; 
 
uniform highp sampler2D tDepth; 
 

 
varying vec2 vUv; 
 

 
// #define PI 3.14159265 
 
#define DL 2.399963229728653 // PI * (3.0 - sqrt(5.0)) 
 
#define EULER 2.718281828459045 
 

 
// user variables 
 

 
const int samples = 4;  // ao sample count 
 
const float radius = 5.0; // ao radius 
 
    
 
const bool useNoise = false;  // use noise instead of pattern for sample dithering 
 
const float noiseAmount = 0.0003; // dithering amount 
 

 
const float diffArea = 0.4; // self-shadowing reduction 
 
const float gDisplace = 0.4; // gauss bell center 
 

 
highp vec2 rand(const vec2 coord) { 
 

 
    highp vec2 noise; 
 

 
    if (useNoise) { 
 

 
     float nx = dot (coord, vec2(12.9898, 78.233)); 
 
     float ny = dot (coord, vec2(12.9898, 78.233) * 2.0); 
 

 
     noise = clamp(fract (43758.5453 * sin(vec2(nx, ny))), 0.0, 1.0); 
 

 
    } else { 
 

 
     highp float ff = fract(1.0 - coord.s * (resolution.x/2.0)); 
 
     highp float gg = fract(coord.t * (resolution.y/2.0)); 
 

 
     noise = vec2(0.25, 0.75) * vec2(ff) + vec2(0.75, 0.25) * gg; 
 

 
    } 
 

 
    return (noise * 2.0 - 1.0) * noiseAmount; 
 

 
    } 
 

 
\t float readDepth(const in vec2 coord) { 
 

 
    \t float cameraFarPlusNear = cameraFar + cameraNear; 
 
\t \t float cameraFarMinusNear = cameraFar - cameraNear; 
 
\t \t float cameraCoef = 2.0 * cameraNear; 
 

 
    \t return cameraCoef/(cameraFarPlusNear - texture2D(tDepth, coord).x * cameraFarMinusNear); 
 

 
\t } 
 

 
    float compareDepths(const in float depth1, const in float depth2, inout int far) { 
 

 
    float garea = 2.0;       // gauss bell width 
 
    float diff = (depth1 - depth2) * 100.0; // depth difference (0-100) 
 

 
    // reduce left bell width to avoid self-shadowing 
 

 
    if (diff < gDisplace) { 
 

 
     garea = diffArea; 
 

 
    } else { 
 

 
     far = 1; 
 

 
    } 
 

 
     float dd = diff - gDisplace; 
 
     float gauss = pow(EULER, -2.0 * dd * dd/(garea * garea)); 
 
     return gauss; 
 

 
    } 
 

 
    float calcAO(float depth, float dw, float dh) { 
 

 
     float dd = radius - depth * radius; 
 
     vec2 vv = vec2(dw, dh); 
 

 
     vec2 coord1 = vUv + dd * vv; 
 
     vec2 coord2 = vUv - dd * vv; 
 

 
     float temp1 = 0.0; 
 
     float temp2 = 0.0; 
 

 
     int far = 0; 
 
     temp1 = compareDepths(depth, readDepth(coord1), far); 
 

 
     // DEPTH EXTRAPOLATION 
 

 
     if (far > 0) { 
 

 
     temp2 = compareDepths(readDepth(coord2), depth, far); 
 
     temp1 += (1.0 - temp1) * temp2; 
 

 
     } 
 

 
     return temp1; 
 

 
    } 
 

 
    void main() { 
 

 
     highp vec2 noise = rand(vUv); 
 
     float depth = readDepth(vUv); 
 
     float tt = clamp(depth, aoClamp, 1.0); 
 

 
     float w = (1.0/resolution.x)/tt + (noise.x * (1.0 - noise.x)); 
 
     float h = (1.0/resolution.y)/tt + (noise.y * (1.0 - noise.y)); 
 

 
     float ao = 0.0; 
 

 
     float dz = 1.0/float(samples); 
 
     float z = 1.0 - dz/2.0; 
 
     float l = 0.0; 
 

 
     for (int i = 0; i <= samples; i ++) { 
 

 
     float r = sqrt(1.0 - z); 
 

 
     float pw = cos(l) * r; 
 
     float ph = sin(l) * r; 
 
     ao += calcAO(depth, pw * w, ph * h); 
 
     z = z - dz; 
 
     l = l + DL; 
 

 
     } 
 

 
     ao /= float(samples); 
 
     ao = 1.0 - ao; 
 

 
     vec3 color = texture2D(tDiffuse, vUv).rgb; 
 

 
     vec3 lumcoeff = vec3(0.299, 0.587, 0.114); 
 
     float lum = dot(color.rgb, lumcoeff); 
 
     vec3 luminance = vec3(lum); 
 

 
     vec3 final = vec3(color * mix(vec3(ao), vec3(1.0), luminance * lumInfluence)); // mix(color * ao, white, luminance) 
 
    \t \t \t \t float depth2 = readDepth(vUv); 
 
     
 
    if (onlyAO) { 
 

 
     final = vec3(mix(vec3(ao), vec3(1.0), luminance * lumInfluence)); // ambient occlusion only 
 

 
    } 
 
\t 
 
\t // gl_FragColor = vec4(vec3(readDepth(vUv)), 1.0); // Depth 
 
    gl_FragColor = vec4(final, 1.0); 
 

 
} 
 
</script>

我很想聽聽是什麼原因造成我的環境閉塞無法正常渲染!

+1

合理的猜測是你的相機靠近飛機太小。 – WestLangley

+0

@WestLangley我現在覺得很蠢!非常感謝你非常非常。我很困惑。它似乎有效。而不是0.1-2000我把它改爲10-2000,現在看起來很好!我會測試一下,看看它是否都是固定的。 – Jason

回答

1

如果您使用透視攝像機並依靠深度圖出於任何目的(包括SSAO和陰影),請謹慎選擇camera.nearcamera.far - 特別是near。 (如果你正在處理陰影,那將是shadow.camera.near)。

將近平面推出儘可能符合您的使用情況。如果您的場景位於平截頭體的前部附近,您將獲得最佳效果。

three.js r.86