901 lines
21 KiB
JavaScript
901 lines
21 KiB
JavaScript
import {
|
|
CubeReflectionMapping,
|
|
CubeRefractionMapping,
|
|
CubeUVReflectionMapping,
|
|
LinearEncoding,
|
|
LinearFilter,
|
|
NoToneMapping,
|
|
NoBlending,
|
|
RGBAFormat,
|
|
HalfFloatType
|
|
} from '../constants.js';
|
|
|
|
import { BufferAttribute } from '../core/BufferAttribute.js';
|
|
import { BufferGeometry } from '../core/BufferGeometry.js';
|
|
import { Mesh } from '../objects/Mesh.js';
|
|
import { OrthographicCamera } from '../cameras/OrthographicCamera.js';
|
|
import { PerspectiveCamera } from '../cameras/PerspectiveCamera.js';
|
|
import { ShaderMaterial } from '../materials/ShaderMaterial.js';
|
|
import { Vector3 } from '../math/Vector3.js';
|
|
import { Color } from '../math/Color.js';
|
|
import { WebGLRenderTarget } from '../renderers/WebGLRenderTarget.js';
|
|
import { MeshBasicMaterial } from '../materials/MeshBasicMaterial.js';
|
|
import { BoxGeometry } from '../geometries/BoxGeometry.js';
|
|
import { BackSide } from '../constants.js';
|
|
|
|
const LOD_MIN = 4;
|
|
|
|
// The standard deviations (radians) associated with the extra mips. These are
|
|
// chosen to approximate a Trowbridge-Reitz distribution function times the
|
|
// geometric shadowing function. These sigma values squared must match the
|
|
// variance #defines in cube_uv_reflection_fragment.glsl.js.
|
|
const EXTRA_LOD_SIGMA = [ 0.125, 0.215, 0.35, 0.446, 0.526, 0.582 ];
|
|
|
|
// The maximum length of the blur for loop. Smaller sigmas will use fewer
|
|
// samples and exit early, but not recompile the shader.
|
|
const MAX_SAMPLES = 20;
|
|
|
|
const _flatCamera = /*@__PURE__*/ new OrthographicCamera();
|
|
const _clearColor = /*@__PURE__*/ new Color();
|
|
let _oldTarget = null;
|
|
|
|
// Golden Ratio
|
|
const PHI = ( 1 + Math.sqrt( 5 ) ) / 2;
|
|
const INV_PHI = 1 / PHI;
|
|
|
|
// Vertices of a dodecahedron (except the opposites, which represent the
|
|
// same axis), used as axis directions evenly spread on a sphere.
|
|
const _axisDirections = [
|
|
/*@__PURE__*/ new Vector3( 1, 1, 1 ),
|
|
/*@__PURE__*/ new Vector3( - 1, 1, 1 ),
|
|
/*@__PURE__*/ new Vector3( 1, 1, - 1 ),
|
|
/*@__PURE__*/ new Vector3( - 1, 1, - 1 ),
|
|
/*@__PURE__*/ new Vector3( 0, PHI, INV_PHI ),
|
|
/*@__PURE__*/ new Vector3( 0, PHI, - INV_PHI ),
|
|
/*@__PURE__*/ new Vector3( INV_PHI, 0, PHI ),
|
|
/*@__PURE__*/ new Vector3( - INV_PHI, 0, PHI ),
|
|
/*@__PURE__*/ new Vector3( PHI, INV_PHI, 0 ),
|
|
/*@__PURE__*/ new Vector3( - PHI, INV_PHI, 0 ) ];
|
|
|
|
/**
|
|
* This class generates a Prefiltered, Mipmapped Radiance Environment Map
|
|
* (PMREM) from a cubeMap environment texture. This allows different levels of
|
|
* blur to be quickly accessed based on material roughness. It is packed into a
|
|
* special CubeUV format that allows us to perform custom interpolation so that
|
|
* we can support nonlinear formats such as RGBE. Unlike a traditional mipmap
|
|
* chain, it only goes down to the LOD_MIN level (above), and then creates extra
|
|
* even more filtered 'mips' at the same LOD_MIN resolution, associated with
|
|
* higher roughness levels. In this way we maintain resolution to smoothly
|
|
* interpolate diffuse lighting while limiting sampling computation.
|
|
*
|
|
* Paper: Fast, Accurate Image-Based Lighting
|
|
* https://drive.google.com/file/d/15y8r_UpKlU9SvV4ILb0C3qCPecS8pvLz/view
|
|
*/
|
|
|
|
class PMREMGenerator {
|
|
|
|
constructor( renderer ) {
|
|
|
|
this._renderer = renderer;
|
|
this._pingPongRenderTarget = null;
|
|
|
|
this._lodMax = 0;
|
|
this._cubeSize = 0;
|
|
this._lodPlanes = [];
|
|
this._sizeLods = [];
|
|
this._sigmas = [];
|
|
|
|
this._blurMaterial = null;
|
|
this._cubemapMaterial = null;
|
|
this._equirectMaterial = null;
|
|
|
|
this._compileMaterial( this._blurMaterial );
|
|
|
|
}
|
|
|
|
/**
|
|
* Generates a PMREM from a supplied Scene, which can be faster than using an
|
|
* image if networking bandwidth is low. Optional sigma specifies a blur radius
|
|
* in radians to be applied to the scene before PMREM generation. Optional near
|
|
* and far planes ensure the scene is rendered in its entirety (the cubeCamera
|
|
* is placed at the origin).
|
|
*/
|
|
fromScene( scene, sigma = 0, near = 0.1, far = 100 ) {
|
|
|
|
_oldTarget = this._renderer.getRenderTarget();
|
|
|
|
this._setSize( 256 );
|
|
|
|
const cubeUVRenderTarget = this._allocateTargets();
|
|
cubeUVRenderTarget.depthBuffer = true;
|
|
|
|
this._sceneToCubeUV( scene, near, far, cubeUVRenderTarget );
|
|
|
|
if ( sigma > 0 ) {
|
|
|
|
this._blur( cubeUVRenderTarget, 0, 0, sigma );
|
|
|
|
}
|
|
|
|
this._applyPMREM( cubeUVRenderTarget );
|
|
this._cleanup( cubeUVRenderTarget );
|
|
|
|
return cubeUVRenderTarget;
|
|
|
|
}
|
|
|
|
/**
|
|
* Generates a PMREM from an equirectangular texture, which can be either LDR
|
|
* or HDR. The ideal input image size is 1k (1024 x 512),
|
|
* as this matches best with the 256 x 256 cubemap output.
|
|
*/
|
|
fromEquirectangular( equirectangular, renderTarget = null ) {
|
|
|
|
return this._fromTexture( equirectangular, renderTarget );
|
|
|
|
}
|
|
|
|
/**
|
|
* Generates a PMREM from an cubemap texture, which can be either LDR
|
|
* or HDR. The ideal input cube size is 256 x 256,
|
|
* as this matches best with the 256 x 256 cubemap output.
|
|
*/
|
|
fromCubemap( cubemap, renderTarget = null ) {
|
|
|
|
return this._fromTexture( cubemap, renderTarget );
|
|
|
|
}
|
|
|
|
/**
|
|
* Pre-compiles the cubemap shader. You can get faster start-up by invoking this method during
|
|
* your texture's network fetch for increased concurrency.
|
|
*/
|
|
compileCubemapShader() {
|
|
|
|
if ( this._cubemapMaterial === null ) {
|
|
|
|
this._cubemapMaterial = _getCubemapMaterial();
|
|
this._compileMaterial( this._cubemapMaterial );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/**
|
|
* Pre-compiles the equirectangular shader. You can get faster start-up by invoking this method during
|
|
* your texture's network fetch for increased concurrency.
|
|
*/
|
|
compileEquirectangularShader() {
|
|
|
|
if ( this._equirectMaterial === null ) {
|
|
|
|
this._equirectMaterial = _getEquirectMaterial();
|
|
this._compileMaterial( this._equirectMaterial );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/**
|
|
* Disposes of the PMREMGenerator's internal memory. Note that PMREMGenerator is a static class,
|
|
* so you should not need more than one PMREMGenerator object. If you do, calling dispose() on
|
|
* one of them will cause any others to also become unusable.
|
|
*/
|
|
dispose() {
|
|
|
|
this._dispose();
|
|
|
|
if ( this._cubemapMaterial !== null ) this._cubemapMaterial.dispose();
|
|
if ( this._equirectMaterial !== null ) this._equirectMaterial.dispose();
|
|
|
|
}
|
|
|
|
// private interface
|
|
|
|
_setSize( cubeSize ) {
|
|
|
|
this._lodMax = Math.floor( Math.log2( cubeSize ) );
|
|
this._cubeSize = Math.pow( 2, this._lodMax );
|
|
|
|
}
|
|
|
|
_dispose() {
|
|
|
|
if ( this._blurMaterial !== null ) this._blurMaterial.dispose();
|
|
|
|
if ( this._pingPongRenderTarget !== null ) this._pingPongRenderTarget.dispose();
|
|
|
|
for ( let i = 0; i < this._lodPlanes.length; i ++ ) {
|
|
|
|
this._lodPlanes[ i ].dispose();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_cleanup( outputTarget ) {
|
|
|
|
this._renderer.setRenderTarget( _oldTarget );
|
|
outputTarget.scissorTest = false;
|
|
_setViewport( outputTarget, 0, 0, outputTarget.width, outputTarget.height );
|
|
|
|
}
|
|
|
|
_fromTexture( texture, renderTarget ) {
|
|
|
|
if ( texture.mapping === CubeReflectionMapping || texture.mapping === CubeRefractionMapping ) {
|
|
|
|
this._setSize( texture.image.length === 0 ? 16 : ( texture.image[ 0 ].width || texture.image[ 0 ].image.width ) );
|
|
|
|
} else { // Equirectangular
|
|
|
|
this._setSize( texture.image.width / 4 );
|
|
|
|
}
|
|
|
|
_oldTarget = this._renderer.getRenderTarget();
|
|
|
|
const cubeUVRenderTarget = renderTarget || this._allocateTargets();
|
|
this._textureToCubeUV( texture, cubeUVRenderTarget );
|
|
this._applyPMREM( cubeUVRenderTarget );
|
|
this._cleanup( cubeUVRenderTarget );
|
|
|
|
return cubeUVRenderTarget;
|
|
|
|
}
|
|
|
|
_allocateTargets() {
|
|
|
|
const width = 3 * Math.max( this._cubeSize, 16 * 7 );
|
|
const height = 4 * this._cubeSize;
|
|
|
|
const params = {
|
|
magFilter: LinearFilter,
|
|
minFilter: LinearFilter,
|
|
generateMipmaps: false,
|
|
type: HalfFloatType,
|
|
format: RGBAFormat,
|
|
encoding: LinearEncoding,
|
|
depthBuffer: false
|
|
};
|
|
|
|
const cubeUVRenderTarget = _createRenderTarget( width, height, params );
|
|
|
|
if ( this._pingPongRenderTarget === null || this._pingPongRenderTarget.width !== width ) {
|
|
|
|
if ( this._pingPongRenderTarget !== null ) {
|
|
|
|
this._dispose();
|
|
|
|
}
|
|
|
|
this._pingPongRenderTarget = _createRenderTarget( width, height, params );
|
|
|
|
const { _lodMax } = this;
|
|
( { sizeLods: this._sizeLods, lodPlanes: this._lodPlanes, sigmas: this._sigmas } = _createPlanes( _lodMax ) );
|
|
|
|
this._blurMaterial = _getBlurShader( _lodMax, width, height );
|
|
|
|
}
|
|
|
|
return cubeUVRenderTarget;
|
|
|
|
}
|
|
|
|
_compileMaterial( material ) {
|
|
|
|
const tmpMesh = new Mesh( this._lodPlanes[ 0 ], material );
|
|
this._renderer.compile( tmpMesh, _flatCamera );
|
|
|
|
}
|
|
|
|
_sceneToCubeUV( scene, near, far, cubeUVRenderTarget ) {
|
|
|
|
const fov = 90;
|
|
const aspect = 1;
|
|
const cubeCamera = new PerspectiveCamera( fov, aspect, near, far );
|
|
const upSign = [ 1, - 1, 1, 1, 1, 1 ];
|
|
const forwardSign = [ 1, 1, 1, - 1, - 1, - 1 ];
|
|
const renderer = this._renderer;
|
|
|
|
const originalAutoClear = renderer.autoClear;
|
|
const toneMapping = renderer.toneMapping;
|
|
renderer.getClearColor( _clearColor );
|
|
|
|
renderer.toneMapping = NoToneMapping;
|
|
renderer.autoClear = false;
|
|
|
|
const backgroundMaterial = new MeshBasicMaterial( {
|
|
name: 'PMREM.Background',
|
|
side: BackSide,
|
|
depthWrite: false,
|
|
depthTest: false,
|
|
} );
|
|
|
|
const backgroundBox = new Mesh( new BoxGeometry(), backgroundMaterial );
|
|
|
|
let useSolidColor = false;
|
|
const background = scene.background;
|
|
|
|
if ( background ) {
|
|
|
|
if ( background.isColor ) {
|
|
|
|
backgroundMaterial.color.copy( background );
|
|
scene.background = null;
|
|
useSolidColor = true;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
backgroundMaterial.color.copy( _clearColor );
|
|
useSolidColor = true;
|
|
|
|
}
|
|
|
|
for ( let i = 0; i < 6; i ++ ) {
|
|
|
|
const col = i % 3;
|
|
|
|
if ( col === 0 ) {
|
|
|
|
cubeCamera.up.set( 0, upSign[ i ], 0 );
|
|
cubeCamera.lookAt( forwardSign[ i ], 0, 0 );
|
|
|
|
} else if ( col === 1 ) {
|
|
|
|
cubeCamera.up.set( 0, 0, upSign[ i ] );
|
|
cubeCamera.lookAt( 0, forwardSign[ i ], 0 );
|
|
|
|
} else {
|
|
|
|
cubeCamera.up.set( 0, upSign[ i ], 0 );
|
|
cubeCamera.lookAt( 0, 0, forwardSign[ i ] );
|
|
|
|
}
|
|
|
|
const size = this._cubeSize;
|
|
|
|
_setViewport( cubeUVRenderTarget, col * size, i > 2 ? size : 0, size, size );
|
|
|
|
renderer.setRenderTarget( cubeUVRenderTarget );
|
|
|
|
if ( useSolidColor ) {
|
|
|
|
renderer.render( backgroundBox, cubeCamera );
|
|
|
|
}
|
|
|
|
renderer.render( scene, cubeCamera );
|
|
|
|
}
|
|
|
|
backgroundBox.geometry.dispose();
|
|
backgroundBox.material.dispose();
|
|
|
|
renderer.toneMapping = toneMapping;
|
|
renderer.autoClear = originalAutoClear;
|
|
scene.background = background;
|
|
|
|
}
|
|
|
|
_textureToCubeUV( texture, cubeUVRenderTarget ) {
|
|
|
|
const renderer = this._renderer;
|
|
|
|
const isCubeTexture = ( texture.mapping === CubeReflectionMapping || texture.mapping === CubeRefractionMapping );
|
|
|
|
if ( isCubeTexture ) {
|
|
|
|
if ( this._cubemapMaterial === null ) {
|
|
|
|
this._cubemapMaterial = _getCubemapMaterial();
|
|
|
|
}
|
|
|
|
this._cubemapMaterial.uniforms.flipEnvMap.value = ( texture.isRenderTargetTexture === false ) ? - 1 : 1;
|
|
|
|
} else {
|
|
|
|
if ( this._equirectMaterial === null ) {
|
|
|
|
this._equirectMaterial = _getEquirectMaterial();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
const material = isCubeTexture ? this._cubemapMaterial : this._equirectMaterial;
|
|
const mesh = new Mesh( this._lodPlanes[ 0 ], material );
|
|
|
|
const uniforms = material.uniforms;
|
|
|
|
uniforms[ 'envMap' ].value = texture;
|
|
|
|
const size = this._cubeSize;
|
|
|
|
_setViewport( cubeUVRenderTarget, 0, 0, 3 * size, 2 * size );
|
|
|
|
renderer.setRenderTarget( cubeUVRenderTarget );
|
|
renderer.render( mesh, _flatCamera );
|
|
|
|
}
|
|
|
|
_applyPMREM( cubeUVRenderTarget ) {
|
|
|
|
const renderer = this._renderer;
|
|
const autoClear = renderer.autoClear;
|
|
renderer.autoClear = false;
|
|
|
|
for ( let i = 1; i < this._lodPlanes.length; i ++ ) {
|
|
|
|
const sigma = Math.sqrt( this._sigmas[ i ] * this._sigmas[ i ] - this._sigmas[ i - 1 ] * this._sigmas[ i - 1 ] );
|
|
|
|
const poleAxis = _axisDirections[ ( i - 1 ) % _axisDirections.length ];
|
|
|
|
this._blur( cubeUVRenderTarget, i - 1, i, sigma, poleAxis );
|
|
|
|
}
|
|
|
|
renderer.autoClear = autoClear;
|
|
|
|
}
|
|
|
|
/**
|
|
* This is a two-pass Gaussian blur for a cubemap. Normally this is done
|
|
* vertically and horizontally, but this breaks down on a cube. Here we apply
|
|
* the blur latitudinally (around the poles), and then longitudinally (towards
|
|
* the poles) to approximate the orthogonally-separable blur. It is least
|
|
* accurate at the poles, but still does a decent job.
|
|
*/
|
|
_blur( cubeUVRenderTarget, lodIn, lodOut, sigma, poleAxis ) {
|
|
|
|
const pingPongRenderTarget = this._pingPongRenderTarget;
|
|
|
|
this._halfBlur(
|
|
cubeUVRenderTarget,
|
|
pingPongRenderTarget,
|
|
lodIn,
|
|
lodOut,
|
|
sigma,
|
|
'latitudinal',
|
|
poleAxis );
|
|
|
|
this._halfBlur(
|
|
pingPongRenderTarget,
|
|
cubeUVRenderTarget,
|
|
lodOut,
|
|
lodOut,
|
|
sigma,
|
|
'longitudinal',
|
|
poleAxis );
|
|
|
|
}
|
|
|
|
_halfBlur( targetIn, targetOut, lodIn, lodOut, sigmaRadians, direction, poleAxis ) {
|
|
|
|
const renderer = this._renderer;
|
|
const blurMaterial = this._blurMaterial;
|
|
|
|
if ( direction !== 'latitudinal' && direction !== 'longitudinal' ) {
|
|
|
|
console.error(
|
|
'blur direction must be either latitudinal or longitudinal!' );
|
|
|
|
}
|
|
|
|
// Number of standard deviations at which to cut off the discrete approximation.
|
|
const STANDARD_DEVIATIONS = 3;
|
|
|
|
const blurMesh = new Mesh( this._lodPlanes[ lodOut ], blurMaterial );
|
|
const blurUniforms = blurMaterial.uniforms;
|
|
|
|
const pixels = this._sizeLods[ lodIn ] - 1;
|
|
const radiansPerPixel = isFinite( sigmaRadians ) ? Math.PI / ( 2 * pixels ) : 2 * Math.PI / ( 2 * MAX_SAMPLES - 1 );
|
|
const sigmaPixels = sigmaRadians / radiansPerPixel;
|
|
const samples = isFinite( sigmaRadians ) ? 1 + Math.floor( STANDARD_DEVIATIONS * sigmaPixels ) : MAX_SAMPLES;
|
|
|
|
if ( samples > MAX_SAMPLES ) {
|
|
|
|
console.warn( `sigmaRadians, ${
|
|
sigmaRadians}, is too large and will clip, as it requested ${
|
|
samples} samples when the maximum is set to ${MAX_SAMPLES}` );
|
|
|
|
}
|
|
|
|
const weights = [];
|
|
let sum = 0;
|
|
|
|
for ( let i = 0; i < MAX_SAMPLES; ++ i ) {
|
|
|
|
const x = i / sigmaPixels;
|
|
const weight = Math.exp( - x * x / 2 );
|
|
weights.push( weight );
|
|
|
|
if ( i === 0 ) {
|
|
|
|
sum += weight;
|
|
|
|
} else if ( i < samples ) {
|
|
|
|
sum += 2 * weight;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for ( let i = 0; i < weights.length; i ++ ) {
|
|
|
|
weights[ i ] = weights[ i ] / sum;
|
|
|
|
}
|
|
|
|
blurUniforms[ 'envMap' ].value = targetIn.texture;
|
|
blurUniforms[ 'samples' ].value = samples;
|
|
blurUniforms[ 'weights' ].value = weights;
|
|
blurUniforms[ 'latitudinal' ].value = direction === 'latitudinal';
|
|
|
|
if ( poleAxis ) {
|
|
|
|
blurUniforms[ 'poleAxis' ].value = poleAxis;
|
|
|
|
}
|
|
|
|
const { _lodMax } = this;
|
|
blurUniforms[ 'dTheta' ].value = radiansPerPixel;
|
|
blurUniforms[ 'mipInt' ].value = _lodMax - lodIn;
|
|
|
|
const outputSize = this._sizeLods[ lodOut ];
|
|
const x = 3 * outputSize * ( lodOut > _lodMax - LOD_MIN ? lodOut - _lodMax + LOD_MIN : 0 );
|
|
const y = 4 * ( this._cubeSize - outputSize );
|
|
|
|
_setViewport( targetOut, x, y, 3 * outputSize, 2 * outputSize );
|
|
renderer.setRenderTarget( targetOut );
|
|
renderer.render( blurMesh, _flatCamera );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
function _createPlanes( lodMax ) {
|
|
|
|
const lodPlanes = [];
|
|
const sizeLods = [];
|
|
const sigmas = [];
|
|
|
|
let lod = lodMax;
|
|
|
|
const totalLods = lodMax - LOD_MIN + 1 + EXTRA_LOD_SIGMA.length;
|
|
|
|
for ( let i = 0; i < totalLods; i ++ ) {
|
|
|
|
const sizeLod = Math.pow( 2, lod );
|
|
sizeLods.push( sizeLod );
|
|
let sigma = 1.0 / sizeLod;
|
|
|
|
if ( i > lodMax - LOD_MIN ) {
|
|
|
|
sigma = EXTRA_LOD_SIGMA[ i - lodMax + LOD_MIN - 1 ];
|
|
|
|
} else if ( i === 0 ) {
|
|
|
|
sigma = 0;
|
|
|
|
}
|
|
|
|
sigmas.push( sigma );
|
|
|
|
const texelSize = 1.0 / ( sizeLod - 2 );
|
|
const min = - texelSize;
|
|
const max = 1 + texelSize;
|
|
const uv1 = [ min, min, max, min, max, max, min, min, max, max, min, max ];
|
|
|
|
const cubeFaces = 6;
|
|
const vertices = 6;
|
|
const positionSize = 3;
|
|
const uvSize = 2;
|
|
const faceIndexSize = 1;
|
|
|
|
const position = new Float32Array( positionSize * vertices * cubeFaces );
|
|
const uv = new Float32Array( uvSize * vertices * cubeFaces );
|
|
const faceIndex = new Float32Array( faceIndexSize * vertices * cubeFaces );
|
|
|
|
for ( let face = 0; face < cubeFaces; face ++ ) {
|
|
|
|
const x = ( face % 3 ) * 2 / 3 - 1;
|
|
const y = face > 2 ? 0 : - 1;
|
|
const coordinates = [
|
|
x, y, 0,
|
|
x + 2 / 3, y, 0,
|
|
x + 2 / 3, y + 1, 0,
|
|
x, y, 0,
|
|
x + 2 / 3, y + 1, 0,
|
|
x, y + 1, 0
|
|
];
|
|
position.set( coordinates, positionSize * vertices * face );
|
|
uv.set( uv1, uvSize * vertices * face );
|
|
const fill = [ face, face, face, face, face, face ];
|
|
faceIndex.set( fill, faceIndexSize * vertices * face );
|
|
|
|
}
|
|
|
|
const planes = new BufferGeometry();
|
|
planes.setAttribute( 'position', new BufferAttribute( position, positionSize ) );
|
|
planes.setAttribute( 'uv', new BufferAttribute( uv, uvSize ) );
|
|
planes.setAttribute( 'faceIndex', new BufferAttribute( faceIndex, faceIndexSize ) );
|
|
lodPlanes.push( planes );
|
|
|
|
if ( lod > LOD_MIN ) {
|
|
|
|
lod --;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return { lodPlanes, sizeLods, sigmas };
|
|
|
|
}
|
|
|
|
function _createRenderTarget( width, height, params ) {
|
|
|
|
const cubeUVRenderTarget = new WebGLRenderTarget( width, height, params );
|
|
cubeUVRenderTarget.texture.mapping = CubeUVReflectionMapping;
|
|
cubeUVRenderTarget.texture.name = 'PMREM.cubeUv';
|
|
cubeUVRenderTarget.scissorTest = true;
|
|
return cubeUVRenderTarget;
|
|
|
|
}
|
|
|
|
function _setViewport( target, x, y, width, height ) {
|
|
|
|
target.viewport.set( x, y, width, height );
|
|
target.scissor.set( x, y, width, height );
|
|
|
|
}
|
|
|
|
function _getBlurShader( lodMax, width, height ) {
|
|
|
|
const weights = new Float32Array( MAX_SAMPLES );
|
|
const poleAxis = new Vector3( 0, 1, 0 );
|
|
const shaderMaterial = new ShaderMaterial( {
|
|
|
|
name: 'SphericalGaussianBlur',
|
|
|
|
defines: {
|
|
'n': MAX_SAMPLES,
|
|
'CUBEUV_TEXEL_WIDTH': 1.0 / width,
|
|
'CUBEUV_TEXEL_HEIGHT': 1.0 / height,
|
|
'CUBEUV_MAX_MIP': `${lodMax}.0`,
|
|
},
|
|
|
|
uniforms: {
|
|
'envMap': { value: null },
|
|
'samples': { value: 1 },
|
|
'weights': { value: weights },
|
|
'latitudinal': { value: false },
|
|
'dTheta': { value: 0 },
|
|
'mipInt': { value: 0 },
|
|
'poleAxis': { value: poleAxis }
|
|
},
|
|
|
|
vertexShader: _getCommonVertexShader(),
|
|
|
|
fragmentShader: /* glsl */`
|
|
|
|
precision mediump float;
|
|
precision mediump int;
|
|
|
|
varying vec3 vOutputDirection;
|
|
|
|
uniform sampler2D envMap;
|
|
uniform int samples;
|
|
uniform float weights[ n ];
|
|
uniform bool latitudinal;
|
|
uniform float dTheta;
|
|
uniform float mipInt;
|
|
uniform vec3 poleAxis;
|
|
|
|
#define ENVMAP_TYPE_CUBE_UV
|
|
#include <cube_uv_reflection_fragment>
|
|
|
|
vec3 getSample( float theta, vec3 axis ) {
|
|
|
|
float cosTheta = cos( theta );
|
|
// Rodrigues' axis-angle rotation
|
|
vec3 sampleDirection = vOutputDirection * cosTheta
|
|
+ cross( axis, vOutputDirection ) * sin( theta )
|
|
+ axis * dot( axis, vOutputDirection ) * ( 1.0 - cosTheta );
|
|
|
|
return bilinearCubeUV( envMap, sampleDirection, mipInt );
|
|
|
|
}
|
|
|
|
void main() {
|
|
|
|
vec3 axis = latitudinal ? poleAxis : cross( poleAxis, vOutputDirection );
|
|
|
|
if ( all( equal( axis, vec3( 0.0 ) ) ) ) {
|
|
|
|
axis = vec3( vOutputDirection.z, 0.0, - vOutputDirection.x );
|
|
|
|
}
|
|
|
|
axis = normalize( axis );
|
|
|
|
gl_FragColor = vec4( 0.0, 0.0, 0.0, 1.0 );
|
|
gl_FragColor.rgb += weights[ 0 ] * getSample( 0.0, axis );
|
|
|
|
for ( int i = 1; i < n; i++ ) {
|
|
|
|
if ( i >= samples ) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
float theta = dTheta * float( i );
|
|
gl_FragColor.rgb += weights[ i ] * getSample( -1.0 * theta, axis );
|
|
gl_FragColor.rgb += weights[ i ] * getSample( theta, axis );
|
|
|
|
}
|
|
|
|
}
|
|
`,
|
|
|
|
blending: NoBlending,
|
|
depthTest: false,
|
|
depthWrite: false
|
|
|
|
} );
|
|
|
|
return shaderMaterial;
|
|
|
|
}
|
|
|
|
function _getEquirectMaterial() {
|
|
|
|
return new ShaderMaterial( {
|
|
|
|
name: 'EquirectangularToCubeUV',
|
|
|
|
uniforms: {
|
|
'envMap': { value: null }
|
|
},
|
|
|
|
vertexShader: _getCommonVertexShader(),
|
|
|
|
fragmentShader: /* glsl */`
|
|
|
|
precision mediump float;
|
|
precision mediump int;
|
|
|
|
varying vec3 vOutputDirection;
|
|
|
|
uniform sampler2D envMap;
|
|
|
|
#include <common>
|
|
|
|
void main() {
|
|
|
|
vec3 outputDirection = normalize( vOutputDirection );
|
|
vec2 uv = equirectUv( outputDirection );
|
|
|
|
gl_FragColor = vec4( texture2D ( envMap, uv ).rgb, 1.0 );
|
|
|
|
}
|
|
`,
|
|
|
|
blending: NoBlending,
|
|
depthTest: false,
|
|
depthWrite: false
|
|
|
|
} );
|
|
|
|
}
|
|
|
|
function _getCubemapMaterial() {
|
|
|
|
return new ShaderMaterial( {
|
|
|
|
name: 'CubemapToCubeUV',
|
|
|
|
uniforms: {
|
|
'envMap': { value: null },
|
|
'flipEnvMap': { value: - 1 }
|
|
},
|
|
|
|
vertexShader: _getCommonVertexShader(),
|
|
|
|
fragmentShader: /* glsl */`
|
|
|
|
precision mediump float;
|
|
precision mediump int;
|
|
|
|
uniform float flipEnvMap;
|
|
|
|
varying vec3 vOutputDirection;
|
|
|
|
uniform samplerCube envMap;
|
|
|
|
void main() {
|
|
|
|
gl_FragColor = textureCube( envMap, vec3( flipEnvMap * vOutputDirection.x, vOutputDirection.yz ) );
|
|
|
|
}
|
|
`,
|
|
|
|
blending: NoBlending,
|
|
depthTest: false,
|
|
depthWrite: false
|
|
|
|
} );
|
|
|
|
}
|
|
|
|
function _getCommonVertexShader() {
|
|
|
|
return /* glsl */`
|
|
|
|
precision mediump float;
|
|
precision mediump int;
|
|
|
|
attribute float faceIndex;
|
|
|
|
varying vec3 vOutputDirection;
|
|
|
|
// RH coordinate system; PMREM face-indexing convention
|
|
vec3 getDirection( vec2 uv, float face ) {
|
|
|
|
uv = 2.0 * uv - 1.0;
|
|
|
|
vec3 direction = vec3( uv, 1.0 );
|
|
|
|
if ( face == 0.0 ) {
|
|
|
|
direction = direction.zyx; // ( 1, v, u ) pos x
|
|
|
|
} else if ( face == 1.0 ) {
|
|
|
|
direction = direction.xzy;
|
|
direction.xz *= -1.0; // ( -u, 1, -v ) pos y
|
|
|
|
} else if ( face == 2.0 ) {
|
|
|
|
direction.x *= -1.0; // ( -u, v, 1 ) pos z
|
|
|
|
} else if ( face == 3.0 ) {
|
|
|
|
direction = direction.zyx;
|
|
direction.xz *= -1.0; // ( -1, v, -u ) neg x
|
|
|
|
} else if ( face == 4.0 ) {
|
|
|
|
direction = direction.xzy;
|
|
direction.xy *= -1.0; // ( -u, -1, v ) neg y
|
|
|
|
} else if ( face == 5.0 ) {
|
|
|
|
direction.z *= -1.0; // ( u, v, -1 ) neg z
|
|
|
|
}
|
|
|
|
return direction;
|
|
|
|
}
|
|
|
|
void main() {
|
|
|
|
vOutputDirection = getDirection( uv, faceIndex );
|
|
gl_Position = vec4( position, 1.0 );
|
|
|
|
}
|
|
`;
|
|
|
|
}
|
|
|
|
export { PMREMGenerator };
|