当前位置:   article > 正文

算法工程师面试常考手撕题(一)—— AI深度学习算法

算法工程师面试手撕代码一般考什么

手撕 numpy写线性回归的随机梯度下降(stochastic gradient descent,SGD)

  在每次更新时用1个样本,可以看到多了随机两个字,随机也就是说我们用样本中的一个例子来近似我所有的样本,来调整θ,因而随机梯度下降是会带来一定的问题,因为计算得到的并不是准确的一个梯度,对于最优化问题,凸问题,虽然不是每次迭代得到的损失函数都向着全局最优方向, 但是大的整体的方向是向全局最优解的,最终的结果往往是在全局最优解附近。

  1. # 数据加载
  2. from sklearn.datasets import fetch_california_housing
  3. from sklearn.model_selection import train_test_split
  4. X, Y = fetch_california_housing(return_X_y=True)
  5. X.shape, Y.shape # (20640, 8), (20640, )
  6. # 数据预处理
  7. ones = np.ones(shape=(X.shape[0], 1))
  8. X = np.hstack([X, ones])
  9. validate_size = 0.2
  10. X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=validate_size, shuffle=True)
  11. # batch 函数
  12. def get_batch(batchsize: int, X: np.ndarray, Y: np.ndarray):
  13. assert 0 == X.shape[0]%batchsize, f'{X.shape[0]}%{batchsize} != 0'
  14. batchnum = X.shape[0]//batchsize
  15. X_new = X.reshape((batchnum, batchsize, X.shape[1]))
  16. Y_new = Y.reshape((batchnum, batchsize, ))
  17. for i in range(batchnum):
  18. yield X_new[i, :, :], Y_new[i, :]
  19. # 损失函数
  20. def mse(X: np.ndarray, Y: np.ndarray, W: np.ndarray):
  21. return 0.5 * np.mean(np.square(X@W-Y))
  22. def diff_mse(X: np.ndarray, Y: np.ndarray, W: np.ndarray):
  23. return X.T@(X@W-Y) / X.shape[0]
  24. # 模型训练
  25. lr = 0.001 # 学习率
  26. num_epochs = 1000 # 训练周期
  27. batch_size = 64 # |每个batch包含的样本数
  28. validate_every = 4 # 多少个周期进行一次检验
  29. def train(num_epochs: int, batch_size: int, validate_every: int, W0: np.ndarray, X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, Y_test: np.ndarray):
  30. loop = tqdm(range(num_epochs))
  31. loss_train = []
  32. loss_validate = []
  33. W = W0
  34. # 遍历epoch
  35. for epoch in loop:
  36. loss_train_epoch = 0
  37. # 遍历batch
  38. for x_batch, y_batch in get_batch(64, X_train, Y_train):
  39. loss_batch = mse(X=x_batch, Y=y_batch, W=W)
  40. loss_train_epoch += loss_batch*x_batch.shape[0]/X_train.shape[0]
  41. grad = diff_mse(X=x_batch, Y=y_batch, W=W)
  42. W = W - lr*grad
  43. loss_train.append(loss_train_epoch)
  44. loop.set_description(f'Epoch: {epoch}, loss: {loss_train_epoch}')
  45. if 0 == epoch%validate_every:
  46. loss_validate_epoch = mse(X=X_test, Y=Y_test, W=W)
  47. loss_validate.append(loss_validate_epoch)
  48. print('============Validate=============')
  49. print(f'Epoch: {epoch}, train loss: {loss_train_epoch}, val loss: {loss_validate_epoch}')
  50. print('================================')
  51. plot_loss(np.array(loss_train), np.array(loss_validate), validate_every)
  52. # 程序运行
  53. W0 = np.random.random(size=(X.shape[1], )) # 初始权重
  54. train(num_epochs=num_epochs, batch_size=batch_size, validate_every=validate_every, W0=W0, X_train=X_train, Y_train=Y_train, X_test=X_test, Y_test=Y_test)

手撕反向传播(backward propagation,BP)法

  BP算法就是反向传播,要输入的数据经过一个前向传播会得到一个输出,但是由于权重的原因,所以其输出会和你想要的输出有差距,这个时候就需要进行反向传播,利用梯度下降,对所有的权重进行更新,这样的话在进行前向传播就会发现其输出和你想要的输出越来越接近了。

  1. # 生成权重以及偏执项layers_dim代表每层的神经元个数,
  2. #比如[2,3,1]代表一个三成的网络,输入为2层,中间为3层输出为1层
  3. def init_parameters(layers_dim):
  4. L = len(layers_dim)
  5. parameters ={}
  6. for i in range(1,L):
  7. parameters["w"+str(i)] = np.random.random([layers_dim[i],layers_dim[i-1]])
  8. parameters["b"+str(i)] = np.zeros((layers_dim[i],1))
  9. return parameters
  10. def sigmoid(z):
  11. return 1.0/(1.0+np.exp(-z))
  12. # sigmoid的导函数
  13. def sigmoid_prime(z):
  14. return sigmoid(z) * (1-sigmoid(z))
  15. # 前向传播,需要用到一个输入x以及所有的权重以及偏执项,都在parameters这个字典里面存储
  16. # 最后返回会返回一个caches里面包含的 是各层的a和z,a[layers]就是最终的输出
  17. def forward(x,parameters):
  18. a = []
  19. z = []
  20. caches = {}
  21. a.append(x)
  22. z.append(x)
  23. layers = len(parameters)//2
  24. # 前面都要用sigmoid
  25. for i in range(1,layers):
  26. z_temp =parameters["w"+str(i)].dot(x) + parameters["b"+str(i)]
  27. z.append(z_temp)
  28. a.append(sigmoid(z_temp))
  29. # 最后一层不用sigmoid
  30. z_temp = parameters["w"+str(layers)].dot(a[layers-1]) + parameters["b"+str(layers)]
  31. z.append(z_temp)
  32. a.append(z_temp)
  33. caches["z"] = z
  34. caches["a"] = a
  35. return caches,a[layers]
  36. # 反向传播,parameters里面存储的是所有的各层的权重以及偏执,caches里面存储各层的a和z
  37. # al是经过反向传播后最后一层的输出,y代表真实值
  38. # 返回的grades代表着误差对所有的w以及b的导数
  39. def backward(parameters,caches,al,y):
  40. layers = len(parameters)//2
  41. grades = {}
  42. m = y.shape[1]
  43. # 假设最后一层不经历激活函数
  44. # 就是按照上面的图片中的公式写的
  45. grades["dz"+str(layers)] = al - y
  46. grades["dw"+str(layers)] = grades["dz"+str(layers)].dot(caches["a"][layers-1].T) /m
  47. grades["db"+str(layers)] = np.sum(grades["dz"+str(layers)],axis = 1,keepdims = True) /m
  48. # 前面全部都是sigmoid激活
  49. for i in reversed(range(1,layers)):
  50. grades["dz"+str(i)] = parameters["w"+str(i+1)].T.dot(grades["dz"+str(i+1)]) * sigmoid_prime(caches["z"][i])
  51. grades["dw"+str(i)] = grades["dz"+str(i)].dot(caches["a"][i-1].T)/m
  52. grades["db"+str(i)] = np.sum(grades["dz"+str(i)],axis = 1,keepdims = True) /m
  53. return grades
  54. # 就是把其所有的权重以及偏执都更新一下
  55. def update_grades(parameters,grades,learning_rate):
  56. layers = len(parameters)//2
  57. for i in range(1,layers+1):
  58. parameters["w"+str(i)] -= learning_rate * grades["dw"+str(i)]
  59. parameters["b"+str(i)] -= learning_rate * grades["db"+str(i)]
  60. return parameters
  61. # 计算误差值
  62. def compute_loss(al,y):
  63. return np.mean(np.square(al-y))
  64. # 加载数据
  65. def load_data():
  66. """
  67. 加载数据集
  68. """
  69. x = np.arange(0.0,1.0,0.01)
  70. y =20* np.sin(2*np.pi*x)
  71. # 数据可视化
  72. plt.scatter(x,y)
  73. return x,y
  74. #进行测试
  75. x,y = load_data()
  76. x = x.reshape(1,100)
  77. y = y.reshape(1,100)
  78. plt.scatter(x,y)
  79. parameters = init_parameters([1,25,1])
  80. al = 0
  81. for i in range(4000):
  82. caches,al = forward(x, parameters)
  83. grades = backward(parameters, caches, al, y)
  84. parameters = update_grades(parameters, grades, learning_rate= 0.3)
  85. if i %100 ==0:
  86. print(compute_loss(al, y))
  87. plt.scatter(x,al)
  88. plt.show()

手撕单头注意力机制(ScaledDotProductAttention)函数

    输入是query和 key-value,注意力机制首先计算query与每个key的关联性(compatibility),每个关联性作为每个value的权重(weight),各个权重与value的乘积相加得到输出。

  1. class ScaledDotProductAttention(nn.Module):
  2. """ Scaled Dot-Product Attention """
  3. def __init__(self, scale):
  4. super().__init__()
  5. self.scale = scale
  6. self.softmax = nn.Softmax(dim=2)
  7. def forward(self, q, k, v, mask=None):
  8. u = torch.bmm(q, k.transpose(1, 2)) # 1.Matmul
  9. u = u / self.scale # 2.Scale
  10. if mask is not None:
  11. u = u.masked_fill(mask, -np.inf) # 3.Mask
  12. attn = self.softmax(u) # 4.Softmax
  13. output = torch.bmm(attn, v) # 5.Output
  14. return attn, output
  15. if __name__ == "__main__":
  16. n_q, n_k, n_v = 2, 4, 4
  17. d_q, d_k, d_v = 128, 128, 64
  18. q = torch.randn(batch, n_q, d_q)
  19. k = torch.randn(batch, n_k, d_k)
  20. v = torch.randn(batch, n_v, d_v)
  21. mask = torch.zeros(batch, n_q, n_k).bool()
  22. attention = ScaledDotProductAttention(scale=np.power(d_k, 0.5))
  23. attn, output = attention(q, k, v, mask=mask)
  24. print(attn)
  25. print(output)

手撕多头注意力(MultiHeadAttention)

  1. class MultiHeadAttention(nn.Module):
  2. """ Multi-Head Attention """
  3. def __init__(self, n_head, d_k_, d_v_, d_k, d_v, d_o):
  4. super().__init__()
  5. self.n_head = n_head
  6. self.d_k = d_k
  7. self.d_v = d_v
  8. self.fc_q = nn.Linear(d_k_, n_head * d_k)
  9. self.fc_k = nn.Linear(d_k_, n_head * d_k)
  10. self.fc_v = nn.Linear(d_v_, n_head * d_v)
  11. self.attention = ScaledDotProductAttention(scale=np.power(d_k, 0.5))
  12. self.fc_o = nn.Linear(n_head * d_v, d_o)
  13. def forward(self, q, k, v, mask=None):
  14. n_head, d_q, d_k, d_v = self.n_head, self.d_k, self.d_k, self.d_v
  15. batch, n_q, d_q_ = q.size()
  16. batch, n_k, d_k_ = k.size()
  17. batch, n_v, d_v_ = v.size()
  18. q = self.fc_q(q) # 1.单头变多头
  19. k = self.fc_k(k)
  20. v = self.fc_v(v)
  21. q = q.view(batch, n_q, n_head, d_q).permute(2, 0, 1, 3).contiguous().view(-1, n_q, d_q)
  22. k = k.view(batch, n_k, n_head, d_k).permute(2, 0, 1, 3).contiguous().view(-1, n_k, d_k)
  23. v = v.view(batch, n_v, n_head, d_v).permute(2, 0, 1, 3).contiguous().view(-1, n_v, d_v)
  24. if mask is not None:
  25. mask = mask.repeat(n_head, 1, 1)
  26. attn, output = self.attention(q, k, v, mask=mask) # 2.当成单头注意力求输出
  27. output = output.view(n_head, batch, n_q, d_v).permute(1, 2, 0, 3).contiguous().view(batch, n_q, -1) # 3.Concat
  28. output = self.fc_o(output) # 4.仿射变换得到最终输出
  29. return attn, output
  30. if __name__ == "__main__":
  31. n_q, n_k, n_v = 2, 4, 4
  32. d_q_, d_k_, d_v_ = 128, 128, 64
  33. q = torch.randn(batch, n_q, d_q_)
  34. k = torch.randn(batch, n_k, d_k_)
  35. v = torch.randn(batch, n_v, d_v_)
  36. mask = torch.zeros(batch, n_q, n_k).bool()
  37. mha = MultiHeadAttention(n_head=8, d_k_=128, d_v_=64, d_k=256, d_v=128, d_o=128)
  38. attn, output = mha(q, k, v, mask=mask)
  39. print(attn.size())
  40. print(output.size())

手撕自注意力机制函数(SelfAttention)

  Self-Attention。和Attention类似,他们都是一种注意力机制。不同的是Attention是source对target,输入的source和输出的target内容不同。例如英译中,输入英文,输出中文。而Self-Attention是source对source,是source内部元素之间或者target内部元素之间发生的Attention机制,也可以理解为Target=Source这种特殊情况下的注意力机制。

  1. class SelfAttention(nn.Module):
  2. """ Self-Attention """
  3. def __init__(self, n_head, d_k, d_v, d_x, d_o):
  4. self.wq = nn.Parameter(torch.Tensor(d_x, d_k))
  5. self.wk = nn.Parameter(torch.Tensor(d_x, d_k))
  6. self.wv = nn.Parameter(torch.Tensor(d_x, d_v))
  7. self.mha = MultiHeadAttention(n_head=n_head, d_k_=d_k, d_v_=d_v, d_k=d_k, d_v=d_v, d_o=d_o)
  8. self.init_parameters()
  9. def init_parameters(self):
  10. for param in self.parameters():
  11. stdv = 1. / np.power(param.size(-1), 0.5)
  12. param.data.uniform_(-stdv, stdv)
  13. def forward(self, x, mask=None):
  14. q = torch.matmul(x, self.wq)
  15. k = torch.matmul(x, self.wk)
  16. v = torch.matmul(x, self.wv)
  17. attn, output = self.mha(q, k, v, mask=mask)
  18. return attn, output
  19. if __name__ == "__main__":
  20. n_x = 4
  21. d_x = 80
  22. x = torch.randn(batch, n_x, d_x)
  23. mask = torch.zeros(batch, n_x, n_x).bool()
  24. selfattn = SelfAttention(n_head=8, d_k=128, d_v=64, d_x=80, d_o=80)
  25. attn, output = selfattn(x, mask=mask)
  26. print(attn.size())
  27. print(output.size())

手撕 beamsearch 算法

 emsp;在NLP翻译或对话任务中,在句子解码阶段,经常用到一种搜索算法beam search。这个算法有时候在大厂面试中,甚至可能会被要求手写实现。这里就从beam search的原理出发,最后手写实现一个beam search。

  • 思路:beam search在贪心搜索上进一步扩大了搜索范围,贪心搜索每下一步只考虑当前最优的top-1结果,beam search考虑最优的top-k个结果。
  1. import torch
  2. import torch.nn.functional as F
  3. def beam_search(LM_prob,beam_size = 3):
  4. batch,seqlen,vocab_size = LM_prob.shape
  5. #对LM_prob取对数
  6. log_LM_prob = LM_prob.log()
  7. #先选择第0个位置的最大beam_size个token,log_emb_prob与indices的shape为(batch,beam)
  8. log_beam_prob, indices = log_LM_prob[:,0,:].topk(beam_size,sorted = True)
  9. indices = indices.unsqueeze(-1)
  10. #对每个长度进行beam search
  11. for i in range(1,seqlen):
  12. #log_beam_prob (batch,beam,vocab_size),每个beam的可能产生的概率
  13. log_beam_prob = log_beam_prob.unsqueeze(-1) + log_LM_prob[:,i,:].unsqueeze(1).repeat(1,beam_size,1)
  14. #选择当前步概率最高的token
  15. log_beam_prob, index = log_beam_prob.view(batch,-1).topk(beam_size,sorted = True)
  16. #下面的计算:beam_id选出新beam来源于之前的哪个beam;index代表真实的token id
  17. #beam_id,index (batch,beam)
  18. beam_id = index//vocab_size
  19. index = index%vocab_size
  20. mid = torch.Tensor([])
  21. #对batch内每个样本循环,选出beam的同时拼接上新生成的token id
  22. for j,bid,idx in zip(range(batch),beam_id,index):
  23. x = torch.cat([indices[j][bid],idx.unsqueeze(-1)],-1)
  24. mid = torch.cat([mid,x.unsqueeze(0)],0)
  25. indices = mid
  26. return indices,log_beam_prob
  27. if __name__=='__main__':
  28. # 建立一个语言模型 LM_prob (batch,seqlen,vocab_size)
  29. LM_prob = F.softmax(torch.randn([32,20,1000]),dim = -1)
  30. #最终返回每个候选,以及每个候选的log_prob,shape为(batch,beam_size,seqlen)
  31. indices,log_prob = beam_search(LM_prob,beam_size = 3)
  32. print(indices)

手撕 k-means 算法

  1. import numpy as np
  2. def kmeans(data, k, thresh=1, max_iterations=100):
  3. # 随机初始化k个中心点
  4. centers = data[np.random.choice(data.shape[0], k, replace=False)]
  5. for _ in range(max_iterations):
  6. # 计算每个样本到各个中心点的距离
  7. distances = np.linalg.norm(data[:, None] - centers, axis=2)
  8. # 根据距离最近的中心点将样本分配到对应的簇
  9. labels = np.argmin(distances, axis=1)
  10. # 更新中心点为每个簇的平均值
  11. new_centers = np.array([data[labels == i].mean(axis=0) for i in range(k)])
  12. # 判断中心点是否收敛,多种收敛条件可选
  13. # 条件1:中心点不再改变
  14. if np.all(centers == new_centers):
  15. break
  16. # 条件2:中心点的阈值小于某个阈值
  17. # center_change = np.linalg.norm(new_centers - centers)
  18. # if center_change < thresh:
  19. # break
  20. centers = new_centers
  21. return labels, centers
  22. # 生成一些随机数据作为示例输入
  23. data = np.random.rand(100, 2) # 100个样本,每个样本有两个特征
  24. # 手动实现K均值算法
  25. k = 3 # 聚类数为3
  26. labels, centers = kmeans(data, k)
  27. # 打印簇标签和聚类中心点
  28. print("簇标签:", labels)
  29. print("聚类中心点:", centers)

手撕 Layer Normalization 算法

  1. import torch
  2. from torch import nn
  3. class LN(nn.Module):
  4. # 初始化
  5. def __init__(self, normalized_shape, # 在哪个维度上做LN
  6. eps:float = 1e-5, # 防止分母为0
  7. elementwise_affine:bool = True): # 是否使用可学习的缩放因子和偏移因子
  8. super(LN, self).__init__()
  9. # 需要对哪个维度的特征做LN, torch.size查看维度
  10. self.normalized_shape = normalized_shape # [c,w*h]
  11. self.eps = eps
  12. self.elementwise_affine = elementwise_affine
  13. # 构造可训练的缩放因子和偏置
  14. if self.elementwise_affine:
  15. self.gain = nn.Parameter(torch.ones(normalized_shape)) # [c,w*h]
  16. self.bias = nn.Parameter(torch.zeros(normalized_shape)) # [c,w*h]
  17. # 前向传播
  18. def forward(self, x: torch.Tensor): # [b,c,w*h]
  19. # 需要做LN的维度和输入特征图对应维度的shape相同
  20. assert self.normalized_shape == x.shape[-len(self.normalized_shape):] # [-2:]
  21. # 需要做LN的维度索引
  22. dims = [-(i+1) for i in range(len(self.normalized_shape))] # [b,c,w*h]维度上取[-1,-2]维度,即[c,w*h]
  23. # 计算特征图对应维度的均值和方差
  24. mean = x.mean(dim=dims, keepdims=True) # [b,1,1]
  25. mean_x2 = (x**2).mean(dim=dims, keepdims=True) # [b,1,1]
  26. var = mean_x2 - mean**2 # [b,c,1,1]
  27. x_norm = (x-mean) / torch.sqrt(var+self.eps) # [b,c,w*h]
  28. # 线性变换
  29. if self.elementwise_affine:
  30. x_norm = self.gain * x_norm + self.bias # [b,c,w*h]
  31. return x_norm
  32. # ------------------------------- #
  33. # 验证
  34. # ------------------------------- #
  35. if __name__ == '__main__':
  36. x = torch.linspace(0, 23, 24, dtype=torch.float32) # 构造输入层
  37. x = x.reshape([2,3,2*2]) # [b,c,w*h]
  38. # 实例化
  39. ln = LN(x.shape[1:])
  40. # 前向传播
  41. x = ln(x)
  42. print(x.shape)

手撕 Batch Normalization 算法

  1. class MyBN:
  2. def __init__(self, momentum=0.01, eps=1e-5, feat_dim=2):
  3. """
  4. 初始化参数值
  5. :param momentum: 动量,用于计算每个batch均值和方差的滑动均值
  6. :param eps: 防止分母为0
  7. :param feat_dim: 特征维度
  8. """
  9. # 均值和方差的滑动均值
  10. self._running_mean = np.zeros(shape=(feat_dim, ))
  11. self._running_var = np.ones((shape=(feat_dim, ))
  12. # 更新self._running_xxx时的动量
  13. self._momentum = momentum
  14. # 防止分母计算为0
  15. self._eps = eps
  16. # 对应Batch Norm中需要更新的beta和gamma,采用pytorch文档中的初始化值
  17. self._beta = np.zeros(shape=(feat_dim, ))
  18. self._gamma = np.ones(shape=(feat_dim, ))
  19. def batch_norm(self, x):
  20. """
  21. BN向传播
  22. :param x: 数据
  23. :return: BN输出
  24. """
  25. if self.training:
  26. x_mean = x.mean(axis=0)
  27. x_var = x.var(axis=0)
  28. # 对应running_mean的更新公式
  29. self._running_mean = (1-self._momentum)*x_mean + self._momentum*self._running_mean
  30. self._running_var = (1-self._momentum)*x_var + self._momentum*self._running_var
  31. # 对应论文中计算BN的公式
  32. x_hat = (x-x_mean)/np.sqrt(x_var+self._eps)
  33. else:
  34. x_hat = (x-self._running_mean)/np.sqrt(self._running_var+self._eps)
  35. return self._gamma*x_hat + self._beta

手撕 二维卷积 算法

  1. import numpy as np
  2. def conv2d(img, in_channels, out_channels ,kernels, bias, stride=1, padding=0):
  3. N, C, H, W = img.shape
  4. kh, kw = kernels.shape
  5. p = padding
  6. assert C == in_channels, "kernels' input channels do not match with img"
  7. if p:
  8. img = np.pad(img, ((0,0),(0,0),(p,p),(p,p)), 'constant') # padding along with all axis
  9. out_h = (H + 2*padding - kh) // stride + 1
  10. out_w = (W + 2*padding - kw) // stride + 1
  11. outputs = np.zeros([N, out_channels, out_h, out_w])
  12. # print(img)
  13. for n in range(N):
  14. for out in range(out_channels):
  15. for i in range(in_channels):
  16. for h in range(out_h):
  17. for w in range(out_w):
  18. for x in range(kh):
  19. for y in range(kw):
  20. outputs[n][out][h][w] += img[n][i][h * stride + x][w * stride + y] * kernels[x][y]
  21. if i == in_channels - 1:
  22. outputs[n][out][:][:] += bias[n][out]
  23. return outputs

-------------THE END-------------

休息一下~

文章来源于微信公众号:AIGC小白入门记 等
文章链接:https://mp.weixin.qq.com/s/2blXYBuKlKrdLOu6VzcqvQhttps://mp.weixin.qq.com/s/8mKp63Qw24q5v1biA7i9uQ
本文仅用于学术分享,如有侵权,请联系后台作删文处理

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/笔触狂放9/article/detail/975549
推荐阅读
相关标签
  

闽ICP备14008679号