当前位置:   article > 正文

卷积,池化,dropout,softmax,relu,bn,nms,iou实现_dropout和softmax

dropout和softmax
#dropout在训练和测试不同,训练时候以P概率失活,然后对dropout输出的数据除以1-p,这样训练出模型;
#还有一种方案不对输出除以1-p,而是在测试的时候对输出乘1-p;
def dropout(x, level):
    if level < 0. or level >= 1:#level是概率值,必须在0~1之间
        raise Exception('Dropout level must be in interval [0, 1[.')
    retain_prob = 1. - level
    #我们通过binomial函数,生成与x一样的维数向量。binomial函数就像抛硬币一样,我们可以把每个神经元当做抛硬币一样
    #硬币 正面的概率为p,n表示每个神经元试验的次数
    #因为我们每个神经元只需要抛一次就可以了所以n=1,size参数是我们有多少个硬币。
    sample=np.random.binomial(n=1,p=retain_prob,size=x.shape)#即将生成一个0、1分布的向量,0表示这个神经元被屏蔽,不工作了,也就是dropout了
    print sample
    x *=sample#0、1与x相乘,我们就可以屏蔽某些神经元,让它们的值变为0
    print x
    x /= retain_prob
 
    return x
def conv(x,conv_param,w,b):
	N, C, H, W = x.shape
	s,p=conv_param
	c_out,c_in,kh,kw = w.shape

	Wn = (W - kw + 2*p) / s + 1
	Hn = (H - kh + 2*p) / s + 1

	out = np.zeros(N, c_out, Hn, Wn)
	n_pad = ((0,0),(0,0),(p,p),(p,p))
	x_pad = np.pad(x,n_pad,mode='constant',constant_values=0)
	for i in range(N):
		for oc in range(c_out):
			for h in range(Hn):
				for w in range(Wn):
					patch = x_pad[i, :, h*k:h*k+s, w*k:w*k+s]
					out[i,oc,h,w] = patch * w[oc,:,:] +b[oc]
	cache=(x,conv_param,w,b)
	return out,cache

class max_pooling(object):
	def __init__(self,x,pool_param):
		pass
	def forward(self, x, pool_param):
		kh,kw,s=pool_param				
		N,C,H,W=x.shape
		Hn = 1+ (H-kh)/s
		Wn = 1+ (W-kw)/s
		out = np.zeros(N,C,Hn,Wn)
		for i in range(Hn):
			for j in range(Wn):
				out[...,i,j] = np.max(x[..., i*s:i*s+kh, j*s:j*s+kw],axis=(2,3))
		cache=(x,out,pool_param)
		return out, cache

	def backward(self, dout, cache):
		x, out, pool_param = cache
		kh, kw, s=pool_param
		dx = np.zeros_like(dx)
		N,C,H,W=dout.shape
		for i in range(H):
			for j in range(W):
				mark = np.max(x[..., i*s:i*s+kh, j*s:j*s+kw],axis=(2,3))==out[...,i,j][...,np.newaxis,np.newaxis]
				dx[...,i*s:i*s+kh,j*s:j*s+kw]=mark*dout[...,i,j][...,np.newaxis,np.newaxis]
		return dx

class softmax(object):
	def __init__(self,prediction):
		pass
	def call_loss(self,prediction,label):
		self.loss = 0
		n = prediction.shape[0]
		for i in range(n):
			self.loss+=prediction[i,label]-np.log(np.sum(np.exp(prediction[i])))
		return self.loss	


	def prediction(self,prediction):
		n = prediction.shape[0]
		exp_prediction = np.zeros(prediction.shape)
		self.softmax = np.zeros(prediction.shape)
		for i in n:
			prediction[i,:] = prediction[i,:]-np.max(prediction[i,:])
			exp_prediction[i] = np.exp(prediction[i])
			self.softmax[i]=exp_prediction[i]/np.sum(exp_prediction[i])
		return self.softmax

class Relu(object):
	def __init__(self,shape):
		self.dx=np.zeros(shape)
		self.x=np.zeros(sjape)
		pass
	def forward(self,x):
		self.x=x
		return np.maximum(0,x)
	def backward(self,dx):	
		dx[self.x<0]=0
		return dx
class BN(object):
	def __init__(self,shape):
		self.moving_mean=np.zeros(shape[1])
		self.moving_val=np.zeros(shape[1])
		self.epsilon =0.000001
		self.moving_decay = 0.997
		self.batchsize = shape[0]
		pass
	def forward(self,x):
		self.mean = np.mean(x,axis=(0,2,3))
		#self.val = np.val(x,sxis=(0,2,3))
		self.val = self.bactchsize/(self.batchsize-1)*np.val(x,axis=(0,2,3)) if self.batchsize>1 else np.val(x,axis=(0,2,3))

		if np.sum(self.moving_mean) == 0 and np.sum(self.moving_val) == 0:
			self.moving_mean = sel.mean
			self.moving_val = sel.val


		self.moving_mean = sel.moving_decay * self.moving_mean + (1-self.moving_decay)*self.mean 
		self.moving_val = sel.moving_decay * self.moving_val + (1-self.moving_decay)*self.val

		if self.training:
			self.normal_x = (x-self.mean)/np.sqrt(self.val+self.epsilon)
		else:
			self.normal_x = (x-self.moving_mean)/np.sqrt(self.moving_val+self.epsilon)

		return self.normal_x*self.alpha + self.beta

    def gradient(self, eta):
        self.a_gradient = np.sum(eta * self.normed_x, axis=(0, 1, 2))
        self.b_gradient = np.sum(eta * self.normed_x, axis=(0, 1, 2))


        normed_x_gradient = eta * self.alpha
        var_gradient = np.sum(-1.0/2*normed_x_gradient*(self.input_data - self.mean)/(self.var+self.epsilon)**(3.0/2), axis=(0,1,2))
        mean_gradinet = np.sum(-1/np.sqrt(self.var+self.epsilon)*normed_x_gradient, axis=(0,1,2))

        x_gradient = normed_x_gradient*np.sqrt(self.var+self.epsilon)+2*(self.input_data-self.mean)*var_gradient/self.batch_size+mean_gradinet/self.batch_size

        return x_gradient

    def backward(self, alpha=0.0001):
        self.alpha -= alpha * self.a_gradient
        self.beta -= alpha * self.b_gradient

def im2col(x,k,s):
	cols = []
	N,C,H,W = x.shape
	for i in range(H):
		for j in range(W):        
			col = x[:,:,i*s:i*s+k,j*s:j*s+k].reshape([-1])
			cols.append(col)
	return np.array(cols)		
def nms(bboxs,scores,threshold):
	t = threshold
	x1 = bboxs[:,0]
	y1 = bboxs[:,1]
	x2 = bboxs[:,2]
	y2 = bboxs[:,3]

	areas = (x2 - x1)*(y2-y1)
	_, order = sort(scores, reverse = True)
	keep = []

	while len(order)>0:
		if len(order) == 1:
			i = order.item()
			keep.append(i)
		else
			i = order.item()
			keep.append(i)
		xx1 = x1[oredr[1:]].clamp(min=x1[i]) 
		yy1 = y1[oredr[1:]].clamp(min=y1[i]) 		
		xx2 = x2[oredr[1:]].clamp(max=x2[i]) 
		yy2 = y2[oredr[1:]].clamp(max=y2[i]) 

		inter = (xx2-xx1)*(yy2-yy1)
		iou = inter /(areas[i] + areas[1:] - inter)
		index = (iou<t).nonezero().squeeze()
		if len(index)==0:
			break
		order = order[index+1]
	return keep

def iou(bboxs1,bboxs2):
	N=bboxs1.size
	M=bboxs2.size
	lt = max(bboxs1[:,:2].unsqueeze(1).expand(N,M,2),bboxs2[:,:2].unsqueeze(0).expand(N,M,2))
	rb = min(bboxs1[:,2:].unsqueeze(1).expand(N,M,2),bboxs2[:,2:].unsqueeze(0).expand(N,M,2)

	wh = lt - rb
	wh[wh<0]=0

	inter = wh[0]*wh[1]

	areas1 = ((bboxs1[:,2]-bboxs1[:,0])	* (bboxs1[:3] - bboxs1[:,1])).unsqueeze(1).expand(N,M,2)
	areas2 = ((bboxs2[:,2]-bboxs2[:,0])	* (bboxs2[:3] - bboxs2[:,1])).unsqueeze(0).expand(N,M,2)
	iou = inter / (areas1+areas2-inter)

	return iou
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/盐析白兔/article/detail/75084
推荐阅读
  

闽ICP备14008679号