DirectX11 With Windows SDK完整目录:http://www.cnblogs.com/X-Jun/p/9028764.htmlhtml
因为考虑后续的项目须要有一个比较好的演示环境,所以这里将先从摄像机这个专题入手。在这以前,须要复习一下有关世界矩阵和观察矩阵的内容。git
DirectX11 With Windows SDK完整目录github
欢迎加入QQ群: 727623616 能够一块儿探讨DX11,以及有什么问题也能够在这里汇报。app
若已知物体所在位置\(\mathbf{Q} = (Q_{x}, Q_{y}, Q_{z})\)以及三个互相垂直的坐标轴 \(\mathbf{u} = (u_{x}, u_{y}, u_{z})\), \(\mathbf{v} = (v_{x}, v_{y}, v_{z})\), \(\mathbf{w} = (w_{x}, w_{y}, w_{z})\),则咱们能够获得对应的世界矩阵:
\[ \mathbf{W}=\begin{bmatrix} u_{x} & u_{y} & u_{z} & 0 \\ v_{x} & v_{y} & v_{z} & 0 \\ w_{x} & w_{y} & w_{z} & 0 \\ Q_{x} & Q_{y} & Q_{z} & 1 \end{bmatrix}\]
该矩阵的应用有两种解释方式:ide
然而如今咱们须要作的是从世界坐标系转换到观察空间坐标系,若是把摄像机看作物体的话,则实际上作的至关因而世界矩阵的逆变换,从世界坐标系来到了摄像机的局部坐标系(右方向为X轴,上方向为Y轴,目视方向为Z轴),即\(\mathbf{V}=\mathbf{(RT)}^{-1}=\mathbf{T}^{-1}\mathbf{R}^{-1}=\mathbf{T}^{-1}\mathbf{R}^{T}\)函数
\[ \mathbf{V}=\begin{bmatrix} u_{x} & v_{x} & w_{x} & 0 \\ u_{y} & v_{y} & w_{y} & 0 \\ u_{z} & v_{z} & w_{z} & 0 \\ -\mathbf{Q}\cdot\mathbf{u} & -\mathbf{Q}\cdot\mathbf{v} & -\mathbf{Q}\cdot\mathbf{w} & 1 \end{bmatrix}\]布局
第一人称/自由视角摄像机和第三人称摄像机在元素构成上是有部分相同的地方,所以在这里能够提炼出它们相同的部分来实现摄像机的抽象基类性能
Camera
类的定义以下:spa
class Camera { public: Camera(); virtual ~Camera() = 0; // 获取摄像机位置 DirectX::XMVECTOR GetPositionXM() const; DirectX::XMFLOAT3 GetPosition() const; // 获取摄像机的坐标轴向量 DirectX::XMVECTOR GetRightXM() const; DirectX::XMFLOAT3 GetRight() const; DirectX::XMVECTOR GetUpXM() const; DirectX::XMFLOAT3 GetUp() const; DirectX::XMVECTOR GetLookXM() const; DirectX::XMFLOAT3 GetLook() const; // 获取视锥体信息 float GetNearWindowWidth() const; float GetNearWindowHeight() const; float GetFarWindowWidth() const; float GetFarWindowHeight() const; // 获取矩阵 DirectX::XMMATRIX GetViewXM() const; DirectX::XMMATRIX GetProjXM() const; DirectX::XMMATRIX GetViewProjXM() const; // 获取视口 D3D11_VIEWPORT GetViewPort() const; // 设置视锥体 void SetFrustum(float fovY, float aspect, float nearZ, float farZ); // 设置视口 void SetViewPort(const D3D11_VIEWPORT& viewPort); void SetViewPort(float topLeftX, float topLeftY, float width, float height, float minDepth = 0.0f, float maxDepth = 1.0f); // 更新观察矩阵 virtual void UpdateViewMatrix() = 0; protected: // 摄像机的观察空间坐标系对应在世界坐标系中的表示 DirectX::XMFLOAT3 m_Position; DirectX::XMFLOAT3 m_Right; DirectX::XMFLOAT3 m_Up; DirectX::XMFLOAT3 m_Look; // 视锥体属性 float m_NearZ; float m_FarZ; float m_Aspect; float m_FovY; float m_NearWindowHeight; float m_FarWindowHeight; // 观察矩阵和透视投影矩阵 DirectX::XMFLOAT4X4 m_View; DirectX::XMFLOAT4X4 m_Proj; // 当前视口 D3D11_VIEWPORT m_ViewPort; };
能够看到,不管是什么类型的摄像机,都必定须要包含观察矩阵、投影矩阵以及设置这两个坐标系所须要的一些相关信息。这里面观察矩阵的更新是虚方法,是由于第一人称/自由视角摄像机实现和第三人称的不一样。
这里只列出视锥体信息的获取方法:
float Camera::GetNearWindowWidth() const { return m_Aspect * m_NearWindowHeight; } float Camera::GetNearWindowHeight() const { return m_NearWindowHeight; } float Camera::GetFarWindowWidth() const { return m_Aspect * m_FarWindowHeight; } float Camera::GetFarWindowHeight() const { return m_FarWindowHeight; }
FirstPersonCamera
类的定义以下:
class FirstPersonCamera : public Camera { public: FirstPersonCamera(); ~FirstPersonCamera() override; // 设置摄像机位置 void SetPosition(float x, float y, float z); void SetPosition(const DirectX::XMFLOAT3& v); // 设置摄像机的朝向 void XM_CALLCONV LookAt(DirectX::FXMVECTOR pos, DirectX::FXMVECTOR target, DirectX::FXMVECTOR up); void LookAt(const DirectX::XMFLOAT3& pos, const DirectX::XMFLOAT3& target,const DirectX::XMFLOAT3& up); void XM_CALLCONV LookTo(DirectX::FXMVECTOR pos, DirectX::FXMVECTOR to, DirectX::FXMVECTOR up); void LookTo(const DirectX::XMFLOAT3& pos, const DirectX::XMFLOAT3& to, const DirectX::XMFLOAT3& up); // 平移 void Strafe(float d); // 直行(平面移动) void Walk(float d); // 前进(朝前向移动) void MoveForward(float d); // 上下观察 void Pitch(float rad); // 左右观察 void RotateY(float rad); // 更新观察矩阵 void UpdateViewMatrix() override; };
该第一人称摄像机没有实现碰撞检测,它具备以下功能:
具体实现以下:
void FirstPersonCamera::SetPosition(float x, float y, float z) { SetPosition(XMFLOAT3(x, y, z)); } void FirstPersonCamera::SetPosition(const DirectX::XMFLOAT3 & v) { m_Position = v; } void XM_CALLCONV FirstPersonCamera::LookAt(DirectX::FXMVECTOR pos, DirectX::FXMVECTOR target, DirectX::FXMVECTOR up) { LookTo(pos, target - pos, up); } void FirstPersonCamera::LookAt(const DirectX::XMFLOAT3 & pos, const DirectX::XMFLOAT3 & target,const DirectX::XMFLOAT3 & up) { LookAt(XMLoadFloat3(&pos), XMLoadFloat3(&target), XMLoadFloat3(&up)); } void XM_CALLCONV FirstPersonCamera::LookTo(DirectX::FXMVECTOR pos, DirectX::FXMVECTOR to, DirectX::FXMVECTOR up) { XMVECTOR L = XMVector3Normalize(to); XMVECTOR R = XMVector3Normalize(XMVector3Cross(up, L)); XMVECTOR U = XMVector3Cross(L, R); XMStoreFloat3(&m_Position, pos); XMStoreFloat3(&m_Look, L); XMStoreFloat3(&m_Right, R); XMStoreFloat3(&m_Up, U); } void FirstPersonCamera::LookTo(const DirectX::XMFLOAT3 & pos, const DirectX::XMFLOAT3 & to, const DirectX::XMFLOAT3 & up) { LookTo(XMLoadFloat3(&pos), XMLoadFloat3(&to), XMLoadFloat3(&up)); } void FirstPersonCamera::Strafe(float d) { XMVECTOR Pos = XMLoadFloat3(&m_Position); XMVECTOR Right = XMLoadFloat3(&m_Right); XMVECTOR Dist = XMVectorReplicate(d); // DestPos = Dist * Right + SrcPos XMStoreFloat3(&m_Position, XMVectorMultiplyAdd(Dist, Right, Pos)); } void FirstPersonCamera::Walk(float d) { XMVECTOR Pos = XMLoadFloat3(&m_Position); XMVECTOR Right = XMLoadFloat3(&m_Right); XMVECTOR Up = XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f); XMVECTOR Front = XMVector3Normalize(XMVector3Cross(Right, Up)); XMVECTOR Dist = XMVectorReplicate(d); // DestPos = Dist * Front + SrcPos XMStoreFloat3(&m_Position, XMVectorMultiplyAdd(Dist, Front, Pos)); } void FirstPersonCamera::MoveForward(float d) { XMVECTOR Pos = XMLoadFloat3(&m_Position); XMVECTOR Look = XMLoadFloat3(&m_Look); XMVECTOR Dist = XMVectorReplicate(d); // DestPos = Dist * Look + SrcPos XMStoreFloat3(&m_Position, XMVectorMultiplyAdd(Dist, Look, Pos)); } void FirstPersonCamera::Pitch(float rad) { XMMATRIX R = XMMatrixRotationAxis(XMLoadFloat3(&m_Right), rad); XMVECTOR Up = XMVector3TransformNormal(XMLoadFloat3(&m_Up), R); XMVECTOR Look = XMVector3TransformNormal(XMLoadFloat3(&m_Look), R); float cosPhi = XMVectorGetY(Look); // 将上下视野角度Phi限制在[2pi/9, 7pi/9], // 即余弦值[-cos(2pi/9), cos(2pi/9)]之间 if (fabs(cosPhi) > cosf(XM_2PI / 9)) return; XMStoreFloat3(&m_Up, Up); XMStoreFloat3(&m_Look, Look); } void FirstPersonCamera::RotateY(float rad) { XMMATRIX R = XMMatrixRotationY(rad); XMStoreFloat3(&m_Right, XMVector3TransformNormal(XMLoadFloat3(&m_Right), R)); XMStoreFloat3(&m_Up, XMVector3TransformNormal(XMLoadFloat3(&m_Up), R)); XMStoreFloat3(&m_Look, XMVector3TransformNormal(XMLoadFloat3(&m_Look), R)); }
其中上下视野角度Phi
、观察轴Y值有以下对应关系:
\[L_{y} = cos(\Phi)\]
当Phi
为弧度0
的时候至关于竖直向上看,Phi
为弧度pi
时至关于竖直向下看。在本例中将视野角度Phi
限制在弧度[2pi/9, 7pi/9]
。
FirstPersonCamera::UpdateViewMatrix
方法首先须要从新规格化、正交化摄像机的右方向轴、上方向轴和前方向轴,而后计算剩余的部分以填充观察矩阵:
void FirstPersonCamera::UpdateViewMatrix() { XMVECTOR R = XMLoadFloat3(&m_Right); XMVECTOR U = XMLoadFloat3(&m_Up); XMVECTOR L = XMLoadFloat3(&m_Look); XMVECTOR P = XMLoadFloat3(&m_Position); // 保持摄像机的轴互为正交,且长度都为1 L = XMVector3Normalize(L); U = XMVector3Normalize(XMVector3Cross(L, R)); // U, L已经正交化,须要计算对应叉乘获得R R = XMVector3Cross(U, L); // 填充观察矩阵 float x = -XMVectorGetX(XMVector3Dot(P, R)); float y = -XMVectorGetX(XMVector3Dot(P, U)); float z = -XMVectorGetX(XMVector3Dot(P, L)); XMStoreFloat3(&m_Right, R); XMStoreFloat3(&m_Up, U); XMStoreFloat3(&m_Look, L); m_View = { m_Right.x, m_Up.x, m_Look.x, 0.0f, m_Right.y, m_Up.y, m_Look.y, 0.0f, m_Right.z, m_Up.z, m_Look.z, 0.0f, x, y, z, 1.0f }; }
ThirdPersonCamera
类的定义以下:
class ThirdPersonCamera : public Camera { public: ThirdPersonCamera(); ~ThirdPersonCamera() override; // 获取当前跟踪物体的位置 DirectX::XMFLOAT3 GetTargetPosition() const; // 获取与物体的距离 float GetDistance() const; // 获取绕X轴的旋转方向 float GetRotationX() const; // 获取绕Y轴的旋转方向 float GetRotationY() const; // 绕物体垂直旋转(注意上下视野角度Phi限制在[pi/6, pi/2]) void RotateX(float rad); // 绕物体水平旋转 void RotateY(float rad); // 拉近物体 void Approach(float dist); // 设置初始绕X轴的弧度(注意上下视野角度Phi限制在[pi/6, pi/2]) void SetRotationX(float phi); // 设置初始绕Y轴的弧度 void SetRotationY(float theta); // 设置并绑定待跟踪物体的位置 void SetTarget(const DirectX::XMFLOAT3& target); // 设置初始距离 void SetDistance(float dist); // 设置最小最大容许距离 void SetDistanceMinMax(float minDist, float maxDist); // 更新观察矩阵 void UpdateViewMatrix() override; private: DirectX::XMFLOAT3 m_Target; float m_Distance; // 最小容许距离,最大容许距离 float m_MinDist, m_MaxDist; // 以世界坐标系为基准,当前的旋转角度 float m_Theta; float m_Phi; };
该第三人称摄像机一样没有实现碰撞检测,它具备以下功能:
上述部分具体实现以下:
void ThirdPersonCamera::RotateX(float rad) { m_Phi -= rad; // 将上下视野角度Phi限制在[pi/6, pi/2], // 即余弦值[0, cos(pi/6)]之间 if (m_Phi < XM_PI / 6) m_Phi = XM_PI / 6; else if (m_Phi > XM_PIDIV2) m_Phi = XM_PIDIV2; } void ThirdPersonCamera::RotateY(float rad) { m_Theta = XMScalarModAngle(m_Theta - rad); } void ThirdPersonCamera::Approach(float dist) { m_Distance += dist; // 限制距离在[m_MinDist, m_MaxDist]之间 if (m_Distance < m_MinDist) m_Distance = m_MinDist; else if (m_Distance > m_MaxDist) m_Distance = m_MaxDist; } void ThirdPersonCamera::SetRotationX(float phi) { m_Phi = XMScalarModAngle(phi); // 将上下视野角度Phi限制在[pi/6, pi/2], // 即余弦值[0, cos(pi/6)]之间 if (m_Phi < XM_PI / 6) m_Phi = XM_PI / 6; else if (m_Phi > XM_PIDIV2) m_Phi = XM_PIDIV2; } void ThirdPersonCamera::SetRotationY(float theta) { m_Theta = XMScalarModAngle(theta); } void ThirdPersonCamera::SetTarget(const DirectX::XMFLOAT3 & target) { m_Target = target; } void ThirdPersonCamera::SetDistance(float dist) { m_Distance = dist; } void ThirdPersonCamera::SetDistanceMinMax(float minDist, float maxDist) { m_MinDist = minDist; m_MaxDist = maxDist; }
要计算摄影机在物体后方的某个具体位置,若是使用下面的公式计算出摄像机位置
\[\mathbf{Q} = \mathbf{T} - dist * \mathbf{L} \]
而后经过XMMatrixLookAtLH
函数来获取观察矩阵,在运行时会发现旋转的时候会有不和谐的抖动效果,由于这样计算出来的摄像机位置有偏差影响。
而使用球面坐标系计算出来的摄像机位置会比较平滑,不会看到有抖动效果。
对于右手坐标系,球面坐标系的公式为:
\[\begin{cases} x = Rsin(\phi)cos(\theta) \\ y = Rsin(\phi)sin(\theta) \\ z = Rcos(\phi) \end{cases} \]
而对于左手坐标系,球面坐标系的公式为:
\[\begin{cases} x = Rsin(\phi)cos(\theta) \\ z = Rsin(\phi)sin(\theta) \\ y = Rcos(\phi) \end{cases} \]
最后将物体坐标加上,就能够获得摄像机的坐标:
\[\begin{cases} Q_{x} = T_{x} + Rsin(\phi)cos(\theta) \\ Q_{z} = T_{y} + Rsin(\phi)sin(\theta) \\ Q_{y} = T_{z} + Rcos(\phi) \end{cases} \]
ThirdPersonCamera::UpdateViewMatrix
方法首先须要计算出摄像机的位置,而后和以前同样从新规格化、正交化摄像机的右方向轴、上方向轴和前方向轴,最后计算剩余的部分以填充观察矩阵:
void ThirdPersonCamera::UpdateViewMatrix() { // 球面坐标系 float x = m_Target.x + m_Distance * sinf(m_Phi) * cosf(m_Theta); float z = m_Target.z + m_Distance * sinf(m_Phi) * sinf(m_Theta); float y = m_Target.y + m_Distance * cosf(m_Phi); m_Position = { x, y, z }; XMVECTOR P = XMLoadFloat3(&m_Position); XMVECTOR L = XMVector3Normalize(XMLoadFloat3(&m_Target) - P); XMVECTOR R = XMVector3Normalize(XMVector3Cross(XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f), L)); XMVECTOR U = XMVector3Cross(L, R); // 更新向量 XMStoreFloat3(&m_Right, R); XMStoreFloat3(&m_Up, U); XMStoreFloat3(&m_Look, L); m_View = { m_Right.x, m_Up.x, m_Look.x, 0.0f, m_Right.y, m_Up.y, m_Look.y, 0.0f, m_Right.z, m_Up.z, m_Look.z, 0.0f, -XMVectorGetX(XMVector3Dot(P, R)), -XMVectorGetX(XMVector3Dot(P, U)), -XMVectorGetX(XMVector3Dot(P, L)), 1.0f }; }
因为项目正在逐渐变得更加庞大,常量缓冲区会频繁更新,可是每次更新常量缓冲区都必须将整个块的内容都刷新一遍,若是只是为了更新里面其中一个变量就要进行一次块的刷新,这样会致使性能上的损耗。因此将常量缓冲区根据刷新频率和类别来进行更细致的分块,能够尽量保证每一次更新都不会有变量在进行无心义的刷新。所以HLSL常量缓冲区的变化以下:
cbuffer CBChangesEveryDrawing : register(b0) { matrix g_World; matrix g_WorldInvTranspose; } cbuffer CBChangesEveryFrame : register(b1) { matrix g_View; float3 g_EyePosW; } cbuffer CBChangesOnResize : register(b2) { matrix g_Proj; } cbuffer CBChangesRarely : register(b3) { DirectionalLight g_DirLight[10]; PointLight g_PointLight[10]; SpotLight g_SpotLight[10]; Material g_Material; int g_NumDirLight; int g_NumPointLight; int g_NumSpotLight; }
对应的C++结构体以下:
struct CBChangesEveryDrawing { DirectX::XMMATRIX world; DirectX::XMMATRIX worldInvTranspose; }; struct CBChangesEveryFrame { DirectX::XMMATRIX view; DirectX::XMFLOAT4 eyePos; }; struct CBChangesOnResize { DirectX::XMMATRIX proj; }; struct CBChangesRarely { DirectionalLight dirLight[10]; PointLight pointLight[10]; SpotLight spotLight[10]; Material material; int numDirLight; int numPointLight; int numSpotLight; float pad; // 打包保证16字节对齐 };
这里主要更新频率从快到慢分红了四种:每次绘制物体时、每帧更新时、每次窗口大小变化时、从不更新。而后根据当前项目的实际需求将变量存放在合理的位置上。固然这样子可能会致使不一样着色器须要的变量放在了同一个块上。不过着色器绑定常量缓冲区的操做能够在一开始初始化的时候就完成,因此问题不大。
因为场景中的物体也在逐渐变多,为了尽量方便地去管理每个物体,这里实现了GameObject
类:
class GameObject { public: GameObject(); // 获取位置 DirectX::XMFLOAT3 GetPosition() const; // 设置缓冲区 template<class VertexType, class IndexType> void SetBuffer(ID3D11Device * device, const Geometry::MeshData<VertexType, IndexType>& meshData); // 设置纹理 void SetTexture(ID3D11ShaderResourceView * texture); // 设置矩阵 void SetWorldMatrix(const DirectX::XMFLOAT4X4& world); void XM_CALLCONV SetWorldMatrix(DirectX::FXMMATRIX world); // 绘制 void Draw(ID3D11DeviceContext * deviceContext); // 设置调试对象名 // 若缓冲区被从新设置,调试对象名也须要被从新设置 void SetDebugObjectName(const std::string& name); private: DirectX::XMFLOAT4X4 m_WorldMatrix; // 世界矩阵 ComPtr<ID3D11ShaderResourceView> m_pTexture; // 纹理 ComPtr<ID3D11Buffer> m_pVertexBuffer; // 顶点缓冲区 ComPtr<ID3D11Buffer> m_pIndexBuffer; // 索引缓冲区 UINT m_VertexStride; // 顶点字节大小 UINT m_IndexCount; // 索引数目 };
然而目前的GameObject
类还须要依赖GameApp
类中的几个常量缓冲区,到13章的时候就能够独立出来了。
须要额外注意的是,若是你想动态调整物体,能够记录物体的缩放、旋转、平移量,而后在每一帧修改这些变量,最后再生成世界矩阵。
其中原来GameApp::InitResource
方法中建立顶点和索引缓冲区的操做都转移到了GameObject::SetBuffer
上:
template<class VertexType, class IndexType> void GameApp::GameObject::SetBuffer(ID3D11Device * device, const Geometry::MeshData<VertexType, IndexType>& meshData) { // 释放旧资源 m_pVertexBuffer.Reset(); m_pIndexBuffer.Reset(); // 设置顶点缓冲区描述 m_VertexStride = sizeof(VertexType); D3D11_BUFFER_DESC vbd; ZeroMemory(&vbd, sizeof(vbd)); vbd.Usage = D3D11_USAGE_IMMUTABLE; vbd.ByteWidth = (UINT)meshData.vertexVec.size() * m_VertexStride; vbd.BindFlags = D3D11_BIND_VERTEX_BUFFER; vbd.CPUAccessFlags = 0; // 新建顶点缓冲区 D3D11_SUBRESOURCE_DATA InitData; ZeroMemory(&InitData, sizeof(InitData)); InitData.pSysMem = meshData.vertexVec.data(); HR(device->CreateBuffer(&vbd, &InitData, m_pVertexBuffer.GetAddressOf())); // 设置索引缓冲区描述 m_IndexCount = (UINT)meshData.indexVec.size(); D3D11_BUFFER_DESC ibd; ZeroMemory(&ibd, sizeof(ibd)); ibd.Usage = D3D11_USAGE_IMMUTABLE; ibd.ByteWidth = m_IndexCount * sizeof(IndexType); ibd.BindFlags = D3D11_BIND_INDEX_BUFFER; ibd.CPUAccessFlags = 0; // 新建索引缓冲区 InitData.pSysMem = meshData.indexVec.data(); HR(device->CreateBuffer(&ibd, &InitData, m_pIndexBuffer.GetAddressOf())); }
这里的XX能够是VS
, DS
, CS
, GS
, HS
, PS
,即顶点着色阶段、域着色阶段、计算着色阶段、几何着色阶段、外壳着色阶段、像素着色阶段。它们的形参基本上都是一致的,这里只列举ID3D11DeviceContext::VSGetConstantBuffers
方法的形参含义:
void ID3D11DeviceContext::VSGetConstantBuffers( UINT StartSlot, // [In]指定的起始槽索引 UINT NumBuffers, // [In]常量缓冲区数目 ID3D11Buffer **ppConstantBuffers) = 0; // [Out]常量固定缓冲区数组
最后GameObject::Draw
方法以下,因为内部已经承担了转置,所以在外部设置世界矩阵的时候不须要预先进行转置:
void GameApp::GameObject::Draw(ID3D11DeviceContext * deviceContext) { // 设置顶点/索引缓冲区 UINT strides = m_VertexStride; UINT offsets = 0; deviceContext->IASetVertexBuffers(0, 1, m_pVertexBuffer.GetAddressOf(), &strides, &offsets); deviceContext->IASetIndexBuffer(m_pIndexBuffer.Get(), DXGI_FORMAT_R16_UINT, 0); // 获取以前已经绑定到渲染管线上的常量缓冲区并进行修改 ComPtr<ID3D11Buffer> cBuffer = nullptr; deviceContext->VSGetConstantBuffers(0, 1, cBuffer.GetAddressOf()); CBChangesEveryDrawing cbDrawing; // 内部进行转置,这样外部就不须要提早转置了 XMMATRIX W = XMLoadFloat4x4(&m_WorldMatrix); cbDrawing.world = XMMatrixTranspose(W); cbDrawing.worldInvTranspose = XMMatrixInverse(nullptr, W); // 两次转置抵消 // 更新常量缓冲区 D3D11_MAPPED_SUBRESOURCE mappedData; HR(deviceContext->Map(cBuffer.Get(), 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedData)); memcpy_s(mappedData.pData, sizeof(CBChangesEveryDrawing), &cbDrawing, sizeof(CBChangesEveryDrawing)); deviceContext->Unmap(cBuffer.Get(), 0); // 设置纹理 deviceContext->PSSetShaderResources(0, 1, m_pTexture.GetAddressOf()); // 能够开始绘制 deviceContext->DrawIndexed(m_IndexCount, 0, 0); }
这里会对每次绘制须要更新的常量缓冲区进行修改
因为摄像机保留有设置视锥体和视口的方法,而且须要更新常量缓冲区中的投影矩阵,所以该部分操做须要转移到这里进行:
void GameApp::OnResize() { // 省略... D3DApp::OnResize(); // 省略... // 摄像机变动显示 if (m_pCamera != nullptr) { m_pCamera->SetFrustum(XM_PI / 3, AspectRatio(), 0.5f, 1000.0f); m_pCamera->SetViewPort(0.0f, 0.0f, (float)m_ClientWidth, (float)m_ClientHeight); m_CBOnResize.proj = XMMatrixTranspose(m_pCamera->GetProjXM()); D3D11_MAPPED_SUBRESOURCE mappedData; HR(m_pd3dImmediateContext->Map(m_pConstantBuffers[2].Get(), 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedData)); memcpy_s(mappedData.pData, sizeof(CBChangesOnResize), &m_CBOnResize, sizeof(CBChangesOnResize)); m_pd3dImmediateContext->Unmap(m_pConstantBuffers[2].Get(), 0); } }
该方法建立了墙体、地板和木箱三种游戏物体,而后还建立了多个常量缓冲区,最后渲染管线的各个阶段按须要绑定各类所需资源。这里设置了一个平行光和一盏点光灯:
bool GameApp::InitResource() { // ****************** // 设置常量缓冲区描述 D3D11_BUFFER_DESC cbd; ZeroMemory(&cbd, sizeof(cbd)); cbd.Usage = D3D11_USAGE_DYNAMIC; cbd.BindFlags = D3D11_BIND_CONSTANT_BUFFER; cbd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE; // 新建用于VS和PS的常量缓冲区 cbd.ByteWidth = sizeof(CBChangesEveryDrawing); HR(m_pd3dDevice->CreateBuffer(&cbd, nullptr, m_pConstantBuffers[0].GetAddressOf())); cbd.ByteWidth = sizeof(CBChangesEveryFrame); HR(m_pd3dDevice->CreateBuffer(&cbd, nullptr, m_pConstantBuffers[1].GetAddressOf())); cbd.ByteWidth = sizeof(CBChangesOnResize); HR(m_pd3dDevice->CreateBuffer(&cbd, nullptr, m_pConstantBuffers[2].GetAddressOf())); cbd.ByteWidth = sizeof(CBChangesRarely); HR(m_pd3dDevice->CreateBuffer(&cbd, nullptr, m_pConstantBuffers[3].GetAddressOf())); // ****************** // 初始化游戏对象 ComPtr<ID3D11ShaderResourceView> texture; // 初始化木箱 HR(CreateDDSTextureFromFile(m_pd3dDevice.Get(), L"Texture\\WoodCrate.dds", nullptr, texture.GetAddressOf())); m_WoodCrate.SetBuffer(m_pd3dDevice.Get(), Geometry::CreateBox()); m_WoodCrate.SetTexture(texture.Get()); // 初始化地板 HR(CreateDDSTextureFromFile(m_pd3dDevice.Get(), L"Texture\\floor.dds", nullptr, texture.ReleaseAndGetAddressOf())); m_Floor.SetBuffer(m_pd3dDevice.Get(), Geometry::CreatePlane(XMFLOAT3(0.0f, -1.0f, 0.0f), XMFLOAT2(20.0f, 20.0f), XMFLOAT2(5.0f, 5.0f))); m_Floor.SetTexture(texture.Get()); // 初始化墙体 m_Walls.resize(4); HR(CreateDDSTextureFromFile(m_pd3dDevice.Get(), L"Texture\\brick.dds", nullptr, texture.ReleaseAndGetAddressOf())); // 这里控制墙体四个面的生成 for (int i = 0; i < 4; ++i) { m_Walls[i].SetBuffer(m_pd3dDevice.Get(), Geometry::CreatePlane(XMFLOAT3(), XMFLOAT2(20.0f, 8.0f), XMFLOAT2(5.0f, 1.5f))); XMMATRIX world = XMMatrixRotationX(-XM_PIDIV2) * XMMatrixRotationY(XM_PIDIV2 * i) * XMMatrixTranslation(i % 2 ? -10.0f * (i - 2) : 0.0f, 3.0f, i % 2 == 0 ? -10.0f * (i - 1) : 0.0f); m_Walls[i].SetWorldMatrix(world); m_Walls[i].SetTexture(texture.Get()); } // 初始化采样器状态 D3D11_SAMPLER_DESC sampDesc; ZeroMemory(&sampDesc, sizeof(sampDesc)); sampDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR; sampDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP; sampDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP; sampDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP; sampDesc.ComparisonFunc = D3D11_COMPARISON_NEVER; sampDesc.MinLOD = 0; sampDesc.MaxLOD = D3D11_FLOAT32_MAX; HR(m_pd3dDevice->CreateSamplerState(&sampDesc, m_pSamplerState.GetAddressOf())); // ****************** // 初始化常量缓冲区的值 // 初始化每帧可能会变化的值 m_CameraMode = CameraMode::FirstPerson; auto camera = std::shared_ptr<FirstPersonCamera>(new FirstPersonCamera); m_pCamera = camera; camera->SetViewPort(0.0f, 0.0f, (float)m_ClientWidth, (float)m_ClientHeight); camera->LookAt(XMFLOAT3(), XMFLOAT3(0.0f, 0.0f, 1.0f), XMFLOAT3(0.0f, 1.0f, 0.0f)); // 初始化仅在窗口大小变更时修改的值 m_pCamera->SetFrustum(XM_PI / 3, AspectRatio(), 0.5f, 1000.0f); m_CBOnResize.proj = XMMatrixTranspose(m_pCamera->GetProjXM()); // 初始化不会变化的值 // 环境光 m_CBRarely.dirLight[0].ambient = XMFLOAT4(0.5f, 0.5f, 0.5f, 1.0f); m_CBRarely.dirLight[0].diffuse = XMFLOAT4(0.8f, 0.8f, 0.8f, 1.0f); m_CBRarely.dirLight[0].specular = XMFLOAT4(0.5f, 0.5f, 0.5f, 1.0f); m_CBRarely.dirLight[0].direction = XMFLOAT3(0.0f, -1.0f, 0.0f); // 灯光 m_CBRarely.pointLight[0].position = XMFLOAT3(0.0f, 10.0f, 0.0f); m_CBRarely.pointLight[0].ambient = XMFLOAT4(0.5f, 0.5f, 0.5f, 1.0f); m_CBRarely.pointLight[0].diffuse = XMFLOAT4(0.8f, 0.8f, 0.8f, 1.0f); m_CBRarely.pointLight[0].specular = XMFLOAT4(0.5f, 0.5f, 0.5f, 1.0f); m_CBRarely.pointLight[0].att = XMFLOAT3(0.0f, 0.1f, 0.0f); m_CBRarely.pointLight[0].range = 25.0f; m_CBRarely.numDirLight = 1; m_CBRarely.numPointLight = 1; m_CBRarely.numSpotLight = 0; // 初始化材质 m_CBRarely.material.ambient = XMFLOAT4(0.5f, 0.5f, 0.5f, 1.0f); m_CBRarely.material.diffuse = XMFLOAT4(0.6f, 0.6f, 0.6f, 1.0f); m_CBRarely.material.specular = XMFLOAT4(0.1f, 0.1f, 0.1f, 50.0f); // 更新不容易被修改的常量缓冲区资源 D3D11_MAPPED_SUBRESOURCE mappedData; HR(m_pd3dImmediateContext->Map(m_pConstantBuffers[2].Get(), 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedData)); memcpy_s(mappedData.pData, sizeof(CBChangesOnResize), &m_CBOnResize, sizeof(CBChangesOnResize)); m_pd3dImmediateContext->Unmap(m_pConstantBuffers[2].Get(), 0); HR(m_pd3dImmediateContext->Map(m_pConstantBuffers[3].Get(), 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedData)); memcpy_s(mappedData.pData, sizeof(CBChangesRarely), &m_CBRarely, sizeof(CBChangesRarely)); m_pd3dImmediateContext->Unmap(m_pConstantBuffers[3].Get(), 0); // ****************** // 给渲染管线各个阶段绑定好所需资源 // 设置图元类型,设定输入布局 m_pd3dImmediateContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); m_pd3dImmediateContext->IASetInputLayout(m_pVertexLayout3D.Get()); // 默认绑定3D着色器 m_pd3dImmediateContext->VSSetShader(m_pVertexShader3D.Get(), nullptr, 0); // 预先绑定各自所需的缓冲区,其中每帧更新的缓冲区须要绑定到两个缓冲区上 m_pd3dImmediateContext->VSSetConstantBuffers(0, 1, m_pConstantBuffers[0].GetAddressOf()); m_pd3dImmediateContext->VSSetConstantBuffers(1, 1, m_pConstantBuffers[1].GetAddressOf()); m_pd3dImmediateContext->VSSetConstantBuffers(2, 1, m_pConstantBuffers[2].GetAddressOf()); m_pd3dImmediateContext->PSSetConstantBuffers(1, 1, m_pConstantBuffers[1].GetAddressOf()); m_pd3dImmediateContext->PSSetConstantBuffers(3, 1, m_pConstantBuffers[3].GetAddressOf()); m_pd3dImmediateContext->PSSetShader(m_pPixelShader3D.Get(), nullptr, 0); m_pd3dImmediateContext->PSSetSamplers(0, 1, m_pSamplerState.GetAddressOf()); return true; }
在使用摄像机模式游玩时,鼠标是不可见的。这时候能够将鼠标模式设为相对模式。
首先使用GetSystemMetrics
函数来获取当前屏幕分辨率,在CreateWindow
的时候将窗口居中。
下面是D3DApp::InitMainWindow
的变化:
bool D3DApp::InitMainWindow() { // 省略不变部分... int screenWidth = GetSystemMetrics(SM_CXSCREEN); int screenHeight = GetSystemMetrics(SM_CYSCREEN); // Compute window rectangle dimensions based on requested client area dimensions. RECT R = { 0, 0, m_ClientWidth, m_ClientHeight }; AdjustWindowRect(&R, WS_OVERLAPPEDWINDOW, false); int width = R.right - R.left; int height = R.bottom - R.top; m_hMainWnd = CreateWindow(L"D3DWndClassName", m_MainWndCaption.c_str(), WS_OVERLAPPEDWINDOW, (screenWidth - width) / 2, (screenHeight - height) / 2, width, height, 0, 0, m_hAppInst, 0); // 省略不变部分... return true; }
而后GameApp::Init
方法设置间接模式:
bool GameApp::Init() { if (!D3DApp::Init()) return false; if (!InitEffect()) return false; if (!InitResource()) return false; // 初始化鼠标,键盘不须要 m_pMouse->SetWindow(m_hMainWnd); m_pMouse->SetMode(DirectX::Mouse::MODE_RELATIVE); return true; }
最后就能够开始获取相对位移,并根据当前摄像机的模式和键鼠操做的状态来进行对应操做:
void GameApp::UpdateScene(float dt) { // 更新鼠标事件,获取相对偏移量 Mouse::State mouseState = m_pMouse->GetState(); Mouse::State lastMouseState = m_MouseTracker.GetLastState(); Keyboard::State keyState = m_pKeyboard->GetState(); m_KeyboardTracker.Update(keyState); // 获取子类 auto cam1st = std::dynamic_pointer_cast<FirstPersonCamera>(m_pCamera); auto cam3rd = std::dynamic_pointer_cast<ThirdPersonCamera>(m_pCamera); if (m_CameraMode == CameraMode::FirstPerson || m_CameraMode == CameraMode::Free) { // 第一人称/自由摄像机的操做 // 方向移动 if (keyState.IsKeyDown(Keyboard::W)) { if (m_CameraMode == CameraMode::FirstPerson) cam1st->Walk(dt * 3.0f); else cam1st->MoveForward(dt * 3.0f); } if (keyState.IsKeyDown(Keyboard::S)) { if (m_CameraMode == CameraMode::FirstPerson) cam1st->Walk(dt * -3.0f); else cam1st->MoveForward(dt * -3.0f); } if (keyState.IsKeyDown(Keyboard::A)) cam1st->Strafe(dt * -3.0f); if (keyState.IsKeyDown(Keyboard::D)) cam1st->Strafe(dt * 3.0f); // 将位置限制在[-8.9f, 8.9f]的区域内 // 不容许穿地 XMFLOAT3 adjustedPos; XMStoreFloat3(&adjustedPos, XMVectorClamp(cam1st->GetPositionXM(), XMVectorSet(-8.9f, 0.0f, -8.9f, 0.0f), XMVectorReplicate(8.9f))); cam1st->SetPosition(adjustedPos); // 仅在第一人称模式移动箱子 if (m_CameraMode == CameraMode::FirstPerson) m_WoodCrate.SetWorldMatrix(XMMatrixTranslation(adjustedPos.x, adjustedPos.y, adjustedPos.z)); // 视野旋转,防止开始的差值过大致使的忽然旋转 cam1st->Pitch(mouseState.y * dt * 1.25f); cam1st->RotateY(mouseState.x * dt * 1.25f); } else if (m_CameraMode == CameraMode::ThirdPerson) { // 第三人称摄像机的操做 cam3rd->SetTarget(m_WoodCrate.GetPosition()); // 绕物体旋转 cam3rd->RotateX(mouseState.y * dt * 1.25f); cam3rd->RotateY(mouseState.x * dt * 1.25f); cam3rd->Approach(-mouseState.scrollWheelValue / 120 * 1.0f); } // 更新观察矩阵 m_pCamera->UpdateViewMatrix(); XMStoreFloat4(&m_CBFrame.eyePos, m_pCamera->GetPositionXM()); m_CBFrame.view = XMMatrixTranspose(m_pCamera->GetViewXM()); // 重置滚轮值 m_pMouse->ResetScrollWheelValue(); // 摄像机模式切换 if (m_KeyboardTracker.IsKeyPressed(Keyboard::D1) && m_CameraMode != CameraMode::FirstPerson) { if (!cam1st) { cam1st.reset(new FirstPersonCamera); cam1st->SetFrustum(XM_PI / 3, AspectRatio(), 0.5f, 1000.0f); m_pCamera = cam1st; } cam1st->LookTo(m_WoodCrate.GetPosition(), XMFLOAT3(0.0f, 0.0f, 1.0f), XMFLOAT3(0.0f, 1.0f, 0.0f)); m_CameraMode = CameraMode::FirstPerson; } else if (m_KeyboardTracker.IsKeyPressed(Keyboard::D2) && m_CameraMode != CameraMode::ThirdPerson) { if (!cam3rd) { cam3rd.reset(new ThirdPersonCamera); cam3rd->SetFrustum(XM_PI / 3, AspectRatio(), 0.5f, 1000.0f); m_pCamera = cam3rd; } XMFLOAT3 target = m_WoodCrate.GetPosition(); cam3rd->SetTarget(target); cam3rd->SetDistance(8.0f); cam3rd->SetDistanceMinMax(3.0f, 20.0f); m_CameraMode = CameraMode::ThirdPerson; } else if (m_KeyboardTracker.IsKeyPressed(Keyboard::D3) && m_CameraMode != CameraMode::Free) { if (!cam1st) { cam1st.reset(new FirstPersonCamera); cam1st->SetFrustum(XM_PI / 3, AspectRatio(), 0.5f, 1000.0f); m_pCamera = cam1st; } // 从箱子上方开始 XMFLOAT3 pos = m_WoodCrate.GetPosition(); XMFLOAT3 to = XMFLOAT3(0.0f, 0.0f, 1.0f); XMFLOAT3 up = XMFLOAT3(0.0f, 1.0f, 0.0f); pos.y += 3; cam1st->LookTo(pos, to, up); m_CameraMode = CameraMode::Free; } // 退出程序,这里应向窗口发送销毁信息 if (keyState.IsKeyDown(Keyboard::Escape)) SendMessage(MainWnd(), WM_DESTROY, 0, 0); D3D11_MAPPED_SUBRESOURCE mappedData; HR(m_pd3dImmediateContext->Map(m_pConstantBuffers[1].Get(), 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedData)); memcpy_s(mappedData.pData, sizeof(CBChangesEveryFrame), &m_CBFrame, sizeof(CBChangesEveryFrame)); m_pd3dImmediateContext->Unmap(m_pConstantBuffers[1].Get(), 0); }
其中对摄像机位置使用XMVectorClamp
函数是为了将X, Y和Z值都限制在范围为[-8.9, 8.9]
的立方体活动区域防止跑出场景区域外,但使用第三人称摄像机的时候没有这样的限制,由于能够营造出一种透视观察的效果。
该方法变化不大,具体以下:
void GameApp::DrawScene() { assert(m_pd3dImmediateContext); assert(m_pSwapChain); m_pd3dImmediateContext->ClearRenderTargetView(m_pRenderTargetView.Get(), reinterpret_cast<const float*>(&Colors::Black)); m_pd3dImmediateContext->ClearDepthStencilView(m_pDepthStencilView.Get(), D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0); // // 绘制几何模型 // m_WoodCrate.Draw(m_pd3dImmediateContext.Get()); m_Floor.Draw(m_pd3dImmediateContext.Get()); for (auto& wall : m_Walls) wall.Draw(m_pd3dImmediateContext.Get()); // // 绘制Direct2D部分 // // ... HR(m_pSwapChain->Present(0, 0)); }
最后下面演示了三种模式下的操做效果:
DirectX11 With Windows SDK完整目录
欢迎加入QQ群: 727623616 能够一块儿探讨DX11,以及有什么问题也能够在这里汇报。