散焦模糊”在摄影上又称“景深”。 在现实的相机中,我们需要做“散焦模糊”的原因是:我们需要一个更大的孔来收集光线增加图片的亮度(而不是“针孔”)。我们称“更大的孔”为“光圈”。 光圈的增大,导致图片出现散焦而模糊。 但是我们可以调节成像的位置,改变图片的清晰度,使图片在当前光圈条件下尽可能清晰。 所以,接下来,我们会引入两个参数:aperture(光圈)和focus_dist(成像位置)。 camera.h:

#ifndef cameraH
#define cameraH

//M_PI 是一个宏定义,圆周率的定义,math文件中也有这家伙。
#define M_PI 3.14159265358979323846

#include “ray.h”

vec3 random_in_unit_disk()
{
/*在z=0平面上产生一个“起点在原点,长度小于1,方向随机”的向量。
为什么是z=0平面,这个和相机的倾斜方向有关。
(相机的倾斜方向为view up (简称vup,一般设置为(0,1,0)))
*/
vec3 p;
do{
//p = 2.0*vec3(drand48(),drand48(),0) - vec3(1,1,0);
p = 2.0 * vec3((rand() % (100) / (float)(100)), (rand() % (100) / (float)(100)), 0) - vec3(1, 1, 0);
} while (dot(p, p) >= 1.0);
return p;
}

class camera
{
vec3 origin;
vec3 lower_left_corner;
vec3 horizontal;
vec3 vertical;
vec3 u, v, w;
float lens_radius;

public:
camera(vec3 lookfrom, vec3 lookat, vec3 vup, float vfov, float aspect, float aperture, float focus\_dist)
{
    lens\_radius = aperture / 2;
    float theta = vfov \* M\_PI / 180;
    float half\_height = tan(theta / 2);
    float half\_width = aspect \* half\_height;
    origin = lookfrom;
    w = unit\_vector(lookfrom - lookat);
    u = unit\_vector(cross(vup, w));
    v = cross(w, u);
    lower\_left\_corner = origin - half\_width \* focus\_dist \* u - half\_height \* focus\_dist \* v - focus\_dist \* w;
    horizontal = 2 \* half\_width \* focus\_dist \* u;
    vertical = 2 \* half\_height \* focus\_dist \* v;
}

ray getray(float s, float t)
{
    vec3 rd = lens\_radius \* random\_in\_unit\_disk();
    vec3 offset = u \* rd.x() + v \* rd.y();
    return ray(origin + offset, lower\_left\_corner + s \* horizontal + t \* vertical - origin - offset);
}

};
#endif

cpp:

#include
#include
#include
#include “sphere.h”
#include “hitable_list.h”
#include “camera.h”
#include “material.h”

using namespace std;

//获得反射射线
vec3 RandomInUnitsphere()
{
vec3 p;
do{
p = 2.0f * vec3((rand() % 100 / float(100)), (rand() % 100 / float(100)), (rand() % 100 / float(100))) - vec3(1.0f, 1.0f, 1.0f);
} while (dot(p, p) >= 1.0f);

return p;

}

vec3 Color(const ray& r, hitable* world, int depth)
{
//这个“rec”会在sphere::hit ()中带上来被撞击球的材料属性(指向一个材质对象的指针mat_ptr)。
//根据这个指针可以获取材料对象的成员方法scatter()和成员变量albedo(反射衰减向量)
hit_record rec;
if (world->hit(r, 0.001f, FLT_MAX, rec))
{
ray scattered;
vec3 attenuation;
if (depth < 50 && rec.mat_ptr->scatter(r, rec, attenuation, scattered))
{
//获取反射光线向量scattered和反射衰减向量attenuation
return attenuation * Color(scattered, world, depth + 1);
//反射光线的强度需要乘以反射衰减向量(对应坐标相乘作为新的向量)。
//然后反射光线就扮演之前“原始光线”的角色。如果再次撞击到小球,就再次反射,直到不再撞击到任何球为止
}
else
{
return vec3(0.0f, 0.0f, 0.0f);
}
}
else
{
//绘制背景
vec3 unit_direction = unit_vector(r.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);

    //线性混合,t=1时蓝色,t=0时白色,t介于中间时是混合颜色
    //blended\_value = (1-t)\*start\_value + t\*end\_value
    return (1.0f - t) \* vec3(1.0f, 1.0f, 1.0f) + t \* vec3(0.5f, 0.7f, 1.0f);

    //注意这里,原始光线和反射光线最后都会跑到这里来。
    //背景的颜色:原始光线的方向向量的映射
    //漫反射材料和镜面材料的颜色:最后一次反射光线的方向向量的映射 \*  所有反射衰减系数的乘积。
    //漫反射和镜面反射的区别在于,漫反射的每次反射方向是随机的
}

}

//And add some metal spheres
int main()
{
ofstream outfile;
outfile.open(“IMG.ppm”);

int nx = 800;
int ny = 400;
//采样次数
int ns = 100;
outfile << "P3\\n" << nx << " " << ny << "\\n255\\n";

hitable\* list\[5\];
list\[0\] = new sphere(vec3(0.0f, 0.0f, -1.0f), 0.5f, new lambertian(vec3(0.8f, 0.3f, 0.3f)));
list\[1\] = new sphere(vec3(0.0f, -100.5f, -1.0f), 100.0f, new lambertian(vec3(0.8f, 0.8f, 0.0f)));
list\[2\] = new sphere(vec3(1.0f, 0.0f, -1.0f), 0.5f, new metal(vec3(0.8f, 0.6f, 0.2f), 0.3f));
list\[3\] = new sphere(vec3(-1.0f, 0.0f, -1.0f), 0.5f, new dielectric(1.5f));
list\[4\] = new sphere(vec3(-1.0f, 0.0f, -1.0f), 0.5f, new dielectric(1.5f));
hitable\* world = new hitable\_list(list, 5);

vec3 lookform(3, 3, 2);
vec3 lookat(0, 0, -1);
float dist\_to\_focus = (lookform - lookat).length();
float aperture = 2.0f;

camera cam(lookform, lookat, vec3(0, 1, 0), 20, float(nx) / float(ny), aperture, dist\_to\_focus);

//随机数
default\_random\_engine reng;
uniform\_real\_distribution<float> uni\_dist(0.0f, 1.0f);

for (int j = ny - 1; j >= 0; j--)
{
    for (int i = 0; i < nx; i++)
    {
        vec3 col(0.0f, 0.0f, 0.0f);
        //每个区域采样ns次
        for (int s = 0; s < ns; s++)
        {
            float u = float(i + uni\_dist(reng)) / float(nx);
            float v = float(j + uni\_dist(reng)) / float(ny);
            ray r = cam.getray(u, v);
            //vec3 p = r.point\_at\_parameter(2.0);
            //将本区域((u,v)到(u+1,v+1))的颜色值累加
            col += Color(r, world, 0);
        }
        //获得区域的颜色均值
        col /= float(ns);
        //gamma矫正
        col = vec3(sqrt(col\[0\]), sqrt(col\[1\]), sqrt(col\[2\]));
        int ir = int(255.99 \* col\[0\]);
        int ig = int(255.99 \* col\[1\]);
        int ib = int(255.99 \* col\[2\]);
        outfile << ir << " " << ig << " " << ib << "\\n";
    }
}
outfile.close();
return 0;

}

最终效果: 参考书籍:《Ray Tracing in One Weekend》 RTIOW系列项目地址:GitHub RTIOW系列笔记: RTIOW-ch1:Output an image RTIOW-ch2:The vec3 class RTIOW-ch3:Rays, a simple camera, and background RTIOW-ch4:Adding a sphere RTIOW-ch5:Surface normals and multiple objects RTIOW-ch6:Antialiasing RTIOW-ch7:Diffuse Materials RTIOW-ch8:Metal RTIOW-ch9:Dielectrics RTIOW-ch10:Positionable camera RTIOW-ch11:Defocus Blur RTIOW-ch12:Where next