赞
踩
随着大型语言模型参数量的不断增加,针对其进行预训练的难度越来越大,全量微调的方式也越来越不可行,如何将大模型部署在消费级显卡上进行训练成为一个热门的研究方向。
LoRA论文中假设大模型在训练过程中权重的变化具有较低的“内在秩”,允许我们通过优化适应期间密集层变化的秩分解矩阵来 间接训练神经网络中的一些密集层,同时保持预训练权重冻结。简单的说,LoRA冻结了预训练的模型权重,并将可训练的秩分解矩阵注入到Transformer 架构的每一层,极大地减少了下游任务的可训练参数的数量。
且不会像adapter一样,在网络中插入几层,训练这几层就可以,但是这样会加大网络的深度,加大模型的推理时间。
之前模型:
即现在的权重为
,,,其中
所以训练的参数量会大大降低。将中一些特征进行了放大,在下游任务微调时,就会放大下游任务中相关的特征,这也是为什么用低秩微调有时候比全量微调效果还好(去掉了一些无用的噪声)
A、B一般一个初始化为0,一个采用kaiming_uniform(随机均匀分布)初始化,这样DeltaW = BA刚开始训练的时候输出是0,不会对原始模型的映射产生影响。
LoRA的主体代码实现部分比较简单,分为LoRALayer类,Embedding类,Linear类,MergedLinear类,ConvLoRA类,其实就是嵌入不同的层,大体实现都一样,基本上都是三个步骤:参数初始化、训练、forward,下面介绍一下各个类的功能:
- # LoRALayer相当于一个配置中心,本身没什么特别的
- class LoRALayer(): # LoRA层可以添加到任何有参数训练的层里,但是大多数只添加到注意力层,也就是q、v。
- def __init__( # 一些参数类型
- self,
- r: int, # 秩
- lora_alpha: int,
- lora_dropout: float,
- merge_weights: bool, # 是否将lora和预训练参数合并,设置为false表示禁用
- ):
- self.r = r
- self.lora_alpha = lora_alpha # 超参数,归一化参数,也就是分子 α/r
- # Optional dropout
- if lora_dropout > 0.: # 进行dropout正则化,丢弃一些单元,防止过拟合
- self.lora_dropout = nn.Dropout(p=lora_dropout)
- else: # lambda表达式,输入x,输出x,原样输出
- self.lora_dropout = lambda x: x
- # Mark the weight as unmerged
- self.merged = False
- self.merge_weights = merge_weights
其实就是个配置类,值得注意的是这个秩r,在微调时,专业性越强的领域,往往要求r越大,一般[4,8,16,32,64],但是这样显存要求也更高。
- class Embedding(nn.Embedding, LoRALayer): # 进行embedding
- # LoRA implemented in a dense layer
- def __init__(
- self,
- num_embeddings: int, # 层数
- embedding_dim: int, # 维度
- r: int = 0, # 秩
- lora_alpha: int = 1, # 阿法
- merge_weights: bool = True, # 是否merge
- **kwargs
- ):
- # 初始化参数
- nn.Embedding.__init__(self, num_embeddings, embedding_dim, **kwargs)
- LoRALayer.__init__(self, r=r, lora_alpha=lora_alpha, lora_dropout=0,
- merge_weights=merge_weights)
- # Actual trainable parameters
- if r > 0:
- self.lora_A = nn.Parameter(self.weight.new_zeros((r, num_embeddings))) # 这就是那个A和B
- self.lora_B = nn.Parameter(self.weight.new_zeros((embedding_dim, r))) # Parameter将张量定义为可学习矩阵(主要起到一个告诉模型的作用)
- # 这行代码的作用可能是在某些操作中需要一个与 self.weight 具有相同设备和数据类型的全零张量,以用作初始化或其他计算。这样可以确保新创建的张量与模型中的参数保持一致,避免设备和数据类型的不匹配问题。
- self.scaling = self.lora_alpha / self.r # α/r
- # Freezing the pre-trained weight matrix
- self.weight.requires_grad = False # 冻结预训练模型的权重
- self.reset_parameters() # 重置参数
-
- def reset_parameters(self):
- nn.Embedding.reset_parameters(self)
- if hasattr(self, 'lora_A'):
- # initialize A the same way as the default for nn.Linear and B to zero
- nn.init.zeros_(self.lora_A) # A初始化为0
- nn.init.normal_(self.lora_B) # B正态分布,其实二者可以互换
-
- def train(self, mode: bool = True): # 默认值为true,其实就是让你训练的时候不合并,测试的时候合并
- nn.Embedding.train(self, mode)
- if mode:
- if self.merge_weights and self.merged: # self.merged默认为false,不合并
- # Make sure that the weights are not merged
- if self.r > 0:
- self.weight.data -= (self.lora_B @ self.lora_A).transpose(0, 1) * self.scaling
- self.merged = False
- else:
- if self.merge_weights and not self.merged: # 合并
- # Merge the weights and mark it
- if self.r > 0:
- self.weight.data += (self.lora_B @ self.lora_A).transpose(0, 1) * self.scaling
- # 矩阵乘法,transpose为矩阵转置,然后加入到原始weight中
- self.merged = True
-
- def forward(self, x: torch.Tensor): # 向前传播,其实还是如果微调了且没有合并就合并,否则直接向前传播
- if self.r > 0 and not self.merged: # 如果 r 大于 0 且没有合并,执行以下操作
- result = nn.Embedding.forward(self, x)
- after_A = F.embedding( # 使用 F.embedding 进行额外的嵌入操作
- x, self.lora_A.transpose(0, 1), self.padding_idx, self.max_norm,
- self.norm_type, self.scale_grad_by_freq, self.sparse
- )
- result += (after_A @ self.lora_B.transpose(0, 1)) * self.scaling # 线性变换
- return result
- else:
- return nn.Embedding.forward(self, x) # 如果 r 不大于 0 或已经合并,仅调用 Embedding 类的前向传播
- # 因为加了fin in fin out 参数(用于指代权重矩阵的输入和输出连接的数量,初始化的缩放因子,以便更好地适应网络的结构)的原因,比之前的embedding层多了一个def T
- class Linear(nn.Linear, LoRALayer): # 全连接层,线性层,下面和embedding层是一样的操作
- # LoRA implemented in a dense layer
- def __init__(
- self,
- in_features: int,
- out_features: int,
- r: int = 0,
- lora_alpha: int = 1,
- lora_dropout: float = 0.,
- fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
- merge_weights: bool = True,
- **kwargs
- ):
- nn.Linear.__init__(self, in_features, out_features, **kwargs)
- LoRALayer.__init__(self, r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout,
- merge_weights=merge_weights)
-
- self.fan_in_fan_out = fan_in_fan_out #
- # Actual trainable parameters
- if r > 0:
- self.lora_A = nn.Parameter(self.weight.new_zeros((r, in_features)))
- self.lora_B = nn.Parameter(self.weight.new_zeros((out_features, r)))
- self.scaling = self.lora_alpha / self.r
- # Freezing the pre-trained weight matrix
- self.weight.requires_grad = False
- self.reset_parameters()
- if fan_in_fan_out:
- self.weight.data = self.weight.data.transpose(0, 1)
-
- def reset_parameters(self):
- nn.Linear.reset_parameters(self)
- if hasattr(self, 'lora_A'):
- # initialize B the same way as the default for nn.Linear and A to zero
- # this is different than what is described in the paper but should not affect performance
- nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
- nn.init.zeros_(self.lora_B)
-
- def train(self, mode: bool = True):
- def T(w):
- return w.transpose(0, 1) if self.fan_in_fan_out else w
- nn.Linear.train(self, mode)
- if mode:
- if self.merge_weights and self.merged:
- # Make sure that the weights are not merged
- if self.r > 0:
- self.weight.data -= T(self.lora_B @ self.lora_A) * self.scaling
- self.merged = False
- else:
- if self.merge_weights and not self.merged:
- # Merge the weights and mark it
- if self.r > 0:
- self.weight.data += T(self.lora_B @ self.lora_A) * self.scaling
- self.merged = True
-
- def forward(self, x: torch.Tensor):
- def T(w):
- return w.transpose(0, 1) if self.fan_in_fan_out else w
- if self.r > 0 and not self.merged:
- result = F.linear(x, T(self.weight), bias=self.bias)
- result += (self.lora_dropout(x) @ self.lora_A.transpose(0, 1) @ self.lora_B.transpose(0, 1)) * self.scaling
- return result
- else:
- return F.linear(x, T(self.weight), bias=self.bias)
论文链接:https://arxiv.org/abs/2106.09685
代码库:https://github.com/huggingface/peft/tree/main/src/peft/tuners/lora
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。