当前位置:   article > 正文

caffe实现上采样知识点总结_上采样算法carafe

上采样算法carafe

线性差值-反卷积-空洞卷积-上采样

等等一大堆名称,梳理梳理:

caffe中的deconvolution和upsample的区别?

https://www.zhihu.com/question/63890195

Caffe实现上采样(upsample)方法总结

https://blog.csdn.net/chaipp0607/article/details/95599218

我也问了作者一个问题

关于:

https://github.com/TimoSaemann/caffe-segnet-cudnn5/blob/master/src/caffe/layers/upsample_layer.cpp

和caffe-yolov3实现的upsample_layer.cpp有什么区别???

两个代码是不一样

 caffe-yolov3中的:

https://pan.baidu.com/share/init?surl=3GpoYoqKSCeFX0m0ves_fQ

密码bwrd


在Caffe下自定义网络层 Interp层

https://blog.csdn.net/donkey_1993/article/details/81180059

caffe系列:deeplab中的插值网络层前传和反传的实现分析

https://blog.csdn.net/xizero00/article/details/74330652

Caffe中Interp层的使用

https://www.cnblogs.com/wmr95/p/8715607.html

 

 

https://github.com/hszhao/PSPNet 

 


pytorch中的上采样:

https://blog.csdn.net/qq_31622015/article/details/90573618

 

Pytorch - torch.nn.modules.upsampling 和 interpolate 函数

https://www.aiuai.cn/aifarm605.html


关于yolov3的上采样:

https://blog.csdn.net/qq_34199326/article/details/84072505

原始的darknet:

https://blog.csdn.net/Lin_Danny/article/details/86514126

  1. #include "upsample_layer.h"
  2. #include "cuda.h"
  3. #include "blas.h"
  4. #include <stdio.h>
  5. layer make_upsample_layer(int batch, int w, int h, int c, int stride)
  6. {
  7. layer l = {0};
  8. l.type = UPSAMPLE;
  9. l.batch = batch;
  10. l.w = w;
  11. l.h = h;
  12. l.c = c;
  13. l.out_w = w*stride;
  14. l.out_h = h*stride;
  15. l.out_c = c;
  16. if(stride < 0){
  17. stride = -stride;
  18. l.reverse=1;
  19. l.out_w = w/stride;
  20. l.out_h = h/stride;
  21. }
  22. l.stride = stride;
  23. l.outputs = l.out_w*l.out_h*l.out_c;
  24. l.inputs = l.w*l.h*l.c;
  25. l.delta = calloc(l.outputs*batch, sizeof(float));
  26. l.output = calloc(l.outputs*batch, sizeof(float));;
  27. l.forward = forward_upsample_layer;
  28. l.backward = backward_upsample_layer;
  29. #ifdef GPU
  30. l.forward_gpu = forward_upsample_layer_gpu;
  31. l.backward_gpu = backward_upsample_layer_gpu;
  32. l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
  33. l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
  34. #endif
  35. if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
  36. else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
  37. return l;
  38. }
  39. void resize_upsample_layer(layer *l, int w, int h)
  40. {
  41. l->w = w;
  42. l->h = h;
  43. l->out_w = w*l->stride;
  44. l->out_h = h*l->stride;
  45. if(l->reverse){
  46. l->out_w = w/l->stride;
  47. l->out_h = h/l->stride;
  48. }
  49. l->outputs = l->out_w*l->out_h*l->out_c;
  50. l->inputs = l->h*l->w*l->c;
  51. l->delta = realloc(l->delta, l->outputs*l->batch*sizeof(float));
  52. l->output = realloc(l->output, l->outputs*l->batch*sizeof(float));
  53. #ifdef GPU
  54. cuda_free(l->output_gpu);
  55. cuda_free(l->delta_gpu);
  56. l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
  57. l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch);
  58. #endif
  59. }
  60. void forward_upsample_layer(const layer l, network net)
  61. {
  62. fill_cpu(l.outputs*l.batch, 0, l.output, 1);
  63. if(l.reverse){
  64. upsample_cpu(l.output, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, net.input);
  65. }else{
  66. upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output);
  67. }
  68. }
  69. void backward_upsample_layer(const layer l, network net)
  70. {
  71. if(l.reverse){
  72. upsample_cpu(l.delta, l.out_w, l.out_h, l.c, l.batch, l.stride, 1, l.scale, net.delta);
  73. }else{
  74. upsample_cpu(net.delta, l.w, l.h, l.c, l.batch, l.stride, 0, l.scale, l.delta);
  75. }
  76. }
  77. #ifdef GPU
  78. void forward_upsample_layer_gpu(const layer l, network net)
  79. {
  80. fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1);
  81. if(l.reverse){
  82. upsample_gpu(l.output_gpu, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, net.input_gpu);
  83. }else{
  84. upsample_gpu(net.input_gpu, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output_gpu);
  85. }
  86. }
  87. void backward_upsample_layer_gpu(const layer l, network net)
  88. {
  89. if(l.reverse){
  90. upsample_gpu(l.delta_gpu, l.out_w, l.out_h, l.c, l.batch, l.stride, 1, l.scale, net.delta_gpu);
  91. }else{
  92. upsample_gpu(net.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 0, l.scale, l.delta_gpu);
  93. }
  94. }
  95. #endif

其中:

  1. void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
  2. {
  3. int i, j, k, b;
  4. for(b = 0; b < batch; ++b){
  5. for(k = 0; k < c; ++k){
  6. for(j = 0; j < h*stride; ++j){
  7. for(i = 0; i < w*stride; ++i){
  8. int in_index = b*w*h*c + k*w*h + (j/stride)*w + i/stride;
  9. int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
  10. if(forward) out[out_index] = scale*in[in_index];
  11. else in[in_index] += scale*out[out_index];
  12. }
  13. }
  14. }
  15. }
  16. }

 

 来自:

  1. #include "blas.h"
  2. #include <math.h>
  3. #include <assert.h>
  4. #include <float.h>
  5. #include <stdio.h>
  6. #include <stdlib.h>
  7. #include <string.h>
  8. void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
  9. {
  10. int b,i,j,k;
  11. int out_c = c/(stride*stride);
  12. for(b = 0; b < batch; ++b){
  13. for(k = 0; k < c; ++k){
  14. for(j = 0; j < h; ++j){
  15. for(i = 0; i < w; ++i){
  16. int in_index = i + w*(j + h*(k + c*b));
  17. int c2 = k % out_c;
  18. int offset = k / out_c;
  19. int w2 = i*stride + offset % stride;
  20. int h2 = j*stride + offset / stride;
  21. int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
  22. if(forward) out[out_index] = x[in_index];
  23. else out[in_index] = x[out_index];
  24. }
  25. }
  26. }
  27. }
  28. }
  29. void flatten(float *x, int size, int layers, int batch, int forward)
  30. {
  31. float *swap = calloc(size*layers*batch, sizeof(float));
  32. int i,c,b;
  33. for(b = 0; b < batch; ++b){
  34. for(c = 0; c < layers; ++c){
  35. for(i = 0; i < size; ++i){
  36. int i1 = b*layers*size + c*size + i;
  37. int i2 = b*layers*size + i*layers + c;
  38. if (forward) swap[i2] = x[i1];
  39. else swap[i1] = x[i2];
  40. }
  41. }
  42. }
  43. memcpy(x, swap, size*layers*batch*sizeof(float));
  44. free(swap);
  45. }
  46. void weighted_sum_cpu(float *a, float *b, float *s, int n, float *c)
  47. {
  48. int i;
  49. for(i = 0; i < n; ++i){
  50. c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
  51. }
  52. }
  53. void weighted_delta_cpu(float *a, float *b, float *s, float *da, float *db, float *ds, int n, float *dc)
  54. {
  55. int i;
  56. for(i = 0; i < n; ++i){
  57. if(da) da[i] += dc[i] * s[i];
  58. if(db) db[i] += dc[i] * (1-s[i]);
  59. ds[i] += dc[i] * (a[i] - b[i]);
  60. }
  61. }
  62. void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
  63. {
  64. int stride = w1/w2;
  65. int sample = w2/w1;
  66. assert(stride == h1/h2);
  67. assert(sample == h2/h1);
  68. if(stride < 1) stride = 1;
  69. if(sample < 1) sample = 1;
  70. int minw = (w1 < w2) ? w1 : w2;
  71. int minh = (h1 < h2) ? h1 : h2;
  72. int minc = (c1 < c2) ? c1 : c2;
  73. int i,j,k,b;
  74. for(b = 0; b < batch; ++b){
  75. for(k = 0; k < minc; ++k){
  76. for(j = 0; j < minh; ++j){
  77. for(i = 0; i < minw; ++i){
  78. int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
  79. int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
  80. out[out_index] = s1*out[out_index] + s2*add[add_index];
  81. }
  82. }
  83. }
  84. }
  85. }
  86. void mean_cpu(float *x, int batch, int filters, int spatial, float *mean)
  87. {
  88. float scale = 1./(batch * spatial);
  89. int i,j,k;
  90. for(i = 0; i < filters; ++i){
  91. mean[i] = 0;
  92. for(j = 0; j < batch; ++j){
  93. for(k = 0; k < spatial; ++k){
  94. int index = j*filters*spatial + i*spatial + k;
  95. mean[i] += x[index];
  96. }
  97. }
  98. mean[i] *= scale;
  99. }
  100. }
  101. void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
  102. {
  103. float scale = 1./(batch * spatial - 1);
  104. int i,j,k;
  105. for(i = 0; i < filters; ++i){
  106. variance[i] = 0;
  107. for(j = 0; j < batch; ++j){
  108. for(k = 0; k < spatial; ++k){
  109. int index = j*filters*spatial + i*spatial + k;
  110. variance[i] += pow((x[index] - mean[i]), 2);
  111. }
  112. }
  113. variance[i] *= scale;
  114. }
  115. }
  116. void l2normalize_cpu(float *x, float *dx, int batch, int filters, int spatial)
  117. {
  118. int b,f,i;
  119. for(b = 0; b < batch; ++b){
  120. for(i = 0; i < spatial; ++i){
  121. float sum = 0;
  122. for(f = 0; f < filters; ++f){
  123. int index = b*filters*spatial + f*spatial + i;
  124. sum += powf(x[index], 2);
  125. }
  126. sum = sqrtf(sum);
  127. for(f = 0; f < filters; ++f){
  128. int index = b*filters*spatial + f*spatial + i;
  129. x[index] /= sum;
  130. dx[index] = (1 - x[index]) / sum;
  131. }
  132. }
  133. }
  134. }
  135. void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
  136. {
  137. int b, f, i;
  138. for(b = 0; b < batch; ++b){
  139. for(f = 0; f < filters; ++f){
  140. for(i = 0; i < spatial; ++i){
  141. int index = b*filters*spatial + f*spatial + i;
  142. x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f);
  143. }
  144. }
  145. }
  146. }
  147. void const_cpu(int N, float ALPHA, float *X, int INCX)
  148. {
  149. int i;
  150. for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
  151. }
  152. void mul_cpu(int N, float *X, int INCX, float *Y, int INCY)
  153. {
  154. int i;
  155. for(i = 0; i < N; ++i) Y[i*INCY] *= X[i*INCX];
  156. }
  157. void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
  158. {
  159. int i;
  160. for(i = 0; i < N; ++i) Y[i*INCY] = pow(X[i*INCX], ALPHA);
  161. }
  162. void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
  163. {
  164. int i;
  165. for(i = 0; i < N; ++i) Y[i*INCY] += ALPHA*X[i*INCX];
  166. }
  167. void scal_cpu(int N, float ALPHA, float *X, int INCX)
  168. {
  169. int i;
  170. for(i = 0; i < N; ++i) X[i*INCX] *= ALPHA;
  171. }
  172. void fill_cpu(int N, float ALPHA, float *X, int INCX)
  173. {
  174. int i;
  175. for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
  176. }
  177. void deinter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
  178. {
  179. int i, j;
  180. int index = 0;
  181. for(j = 0; j < B; ++j) {
  182. for(i = 0; i < NX; ++i){
  183. if(X) X[j*NX + i] += OUT[index];
  184. ++index;
  185. }
  186. for(i = 0; i < NY; ++i){
  187. if(Y) Y[j*NY + i] += OUT[index];
  188. ++index;
  189. }
  190. }
  191. }
  192. void inter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
  193. {
  194. int i, j;
  195. int index = 0;
  196. for(j = 0; j < B; ++j) {
  197. for(i = 0; i < NX; ++i){
  198. OUT[index++] = X[j*NX + i];
  199. }
  200. for(i = 0; i < NY; ++i){
  201. OUT[index++] = Y[j*NY + i];
  202. }
  203. }
  204. }
  205. void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
  206. {
  207. int i;
  208. for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
  209. }
  210. void mult_add_into_cpu(int N, float *X, float *Y, float *Z)
  211. {
  212. int i;
  213. for(i = 0; i < N; ++i) Z[i] += X[i]*Y[i];
  214. }
  215. void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
  216. {
  217. int i;
  218. for(i = 0; i < n; ++i){
  219. float diff = truth[i] - pred[i];
  220. float abs_val = fabs(diff);
  221. if(abs_val < 1) {
  222. error[i] = diff * diff;
  223. delta[i] = diff;
  224. }
  225. else {
  226. error[i] = 2*abs_val - 1;
  227. delta[i] = (diff < 0) ? 1 : -1;
  228. }
  229. }
  230. }
  231. void l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
  232. {
  233. int i;
  234. for(i = 0; i < n; ++i){
  235. float diff = truth[i] - pred[i];
  236. error[i] = fabs(diff);
  237. delta[i] = diff > 0 ? 1 : -1;
  238. }
  239. }
  240. void softmax_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error)
  241. {
  242. int i;
  243. for(i = 0; i < n; ++i){
  244. float t = truth[i];
  245. float p = pred[i];
  246. error[i] = (t) ? -log(p) : 0;
  247. delta[i] = t-p;
  248. }
  249. }
  250. void logistic_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error)
  251. {
  252. int i;
  253. for(i = 0; i < n; ++i){
  254. float t = truth[i];
  255. float p = pred[i];
  256. error[i] = -t*log(p) - (1-t)*log(1-p);
  257. delta[i] = t-p;
  258. }
  259. }
  260. void l2_cpu(int n, float *pred, float *truth, float *delta, float *error)
  261. {
  262. int i;
  263. for(i = 0; i < n; ++i){
  264. float diff = truth[i] - pred[i];
  265. error[i] = diff * diff;
  266. delta[i] = diff;
  267. }
  268. }
  269. float dot_cpu(int N, float *X, int INCX, float *Y, int INCY)
  270. {
  271. int i;
  272. float dot = 0;
  273. for(i = 0; i < N; ++i) dot += X[i*INCX] * Y[i*INCY];
  274. return dot;
  275. }
  276. void softmax(float *input, int n, float temp, int stride, float *output)
  277. {
  278. int i;
  279. float sum = 0;
  280. float largest = -FLT_MAX;
  281. for(i = 0; i < n; ++i){
  282. if(input[i*stride] > largest) largest = input[i*stride];
  283. }
  284. for(i = 0; i < n; ++i){
  285. float e = exp(input[i*stride]/temp - largest/temp);
  286. sum += e;
  287. output[i*stride] = e;
  288. }
  289. for(i = 0; i < n; ++i){
  290. output[i*stride] /= sum;
  291. }
  292. }
  293. void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
  294. {
  295. int g, b;
  296. for(b = 0; b < batch; ++b){
  297. for(g = 0; g < groups; ++g){
  298. softmax(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
  299. }
  300. }
  301. }
  302. void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
  303. {
  304. int i, j, k, b;
  305. for(b = 0; b < batch; ++b){
  306. for(k = 0; k < c; ++k){
  307. for(j = 0; j < h*stride; ++j){
  308. for(i = 0; i < w*stride; ++i){
  309. int in_index = b*w*h*c + k*w*h + (j/stride)*w + i/stride;
  310. int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
  311. if(forward) out[out_index] = scale*in[in_index];
  312. else in[in_index] += scale*out[out_index];
  313. }
  314. }
  315. }
  316. }
  317. }

【图像处理】双线性插值法实现图像的缩放(C实现)

https://blog.csdn.net/weixin_43227685/article/details/88970457

  1. #include <string.h>
  2. #include <math.h>
  3. #include <stdio.h>
  4. #include <stdlib.h>
  5. #include <malloc.h>
  6. #include<time.h>//时间相关头文件,可用其中函数计算图像处理速度
  7. #define WIDTHBYTES(bits) (((bits)+31)/32*4)//用于使图像宽度所占字节数为4byte的倍数
  8. //#define MYDRAW_HEIGHT 986 //目标图像高度
  9. //define MYDRAW_WIDTH 1572 //目标图像宽度
  10. #define MYDRAW_HEIGHT 246 //目标图像高度
  11. #define MYDRAW_WIDTH 393 //目标图像
  12. typedef unsigned char BYTE;
  13. typedef unsigned short WORD;
  14. typedef unsigned long DWORD;
  15. typedef long LONG;
  16. //位图文件头信息结构定义
  17. //其中不包含文件类型信息(由于结构体的内存结构决定,要是加了的话将不能正确读取文件信息)
  18. typedef struct tagBITMAPFILEHEADER {
  19. DWORD bfSize; //文件大小
  20. WORD bfReserved1; //保留字,不考虑
  21. WORD bfReserved2; //保留字,同上
  22. DWORD bfOffBits; //实际位图数据的偏移字节数,即前三个部分长度之和
  23. } BITMAPFILEHEADER;
  24. //信息头BITMAPINFOHEADER,也是一个结构,其定义如下:
  25. typedef struct tagBITMAPINFOHEADER {
  26. //public:
  27. DWORD biSize; //指定此结构体的长度,为40
  28. LONG biWidth; //位图宽
  29. LONG biHeight; //位图高
  30. WORD biPlanes; //平面数,为1
  31. WORD biBitCount; //采用颜色位数,可以是1,2,4,8,16,24,新的可以是32
  32. DWORD biCompression; //压缩方式,可以是0,1,2,其中0表示不压缩
  33. DWORD biSizeImage; //实际位图数据占用的字节数
  34. LONG biXPelsPerMeter; //X方向分辨率
  35. LONG biYPelsPerMeter; //Y方向分辨率
  36. DWORD biClrUsed; //使用的颜色数,如果为0,则表示默认值(2^颜色位数)
  37. DWORD biClrImportant; //重要颜色数,如果为0,则表示所有颜色都是重要的
  38. } BITMAPINFOHEADER;
  39. void main()
  40. {
  41. long now = 0;
  42. now = clock();//存储图像处理开始时间
  43. BITMAPFILEHEADER bitHead, writebitHead;
  44. BITMAPINFOHEADER bitInfoHead, writebitInfoHead;
  45. FILE* pfile;//输入文件
  46. FILE* wfile;//输出文件
  47. char strFile[50] = "C:\\testpicture\\1.bmp";//打开图像路径,BMP图像必须为24位真彩色格式
  48. char strFilesave[50] = "C:\\testpicture\\3.bmp";//处理后图像存储路径
  49. fopen_s(&pfile, strFile, "rb");//文件打开图像
  50. fopen_s(&wfile, strFilesave, "wb");//打开文件为存储修改后图像做准备
  51. //读取位图文件头信息
  52. WORD fileType;
  53. fread(&fileType, 1, sizeof(WORD), pfile);
  54. fwrite(&fileType, 1, sizeof(WORD), wfile);
  55. if (fileType != 0x4d42)
  56. {
  57. printf("file is not .bmp file!");
  58. return;
  59. }
  60. //读取位图文件头信息
  61. fread(&bitHead, 1, sizeof(tagBITMAPFILEHEADER), pfile);
  62. writebitHead = bitHead;//由于截取图像头和源文件头相似,所以先将源文件头数据赋予截取文件头
  63. //读取位图信息头信息
  64. fread(&bitInfoHead, 1, sizeof(BITMAPINFOHEADER), pfile);
  65. writebitInfoHead = bitInfoHead;//同位图文件头相似
  66. writebitInfoHead.biHeight = MYDRAW_HEIGHT;//为截取文件重写位图高度
  67. writebitInfoHead.biWidth = MYDRAW_WIDTH;//为截取文件重写位图宽度
  68. int mywritewidth = WIDTHBYTES(writebitInfoHead.biWidth*writebitInfoHead.biBitCount);//BMP图像实际位图数据区的宽度为4byte的倍数,在此计算实际数据区宽度
  69. writebitInfoHead.biSizeImage = mywritewidth*writebitInfoHead.biHeight;//计算位图实际数据区大小
  70. writebitHead.bfSize = 54 + writebitInfoHead.biSizeImage;//位图文件头大小为位图数据区大小加上54byte
  71. fwrite(&writebitHead, 1, sizeof(tagBITMAPFILEHEADER), wfile);//写回位图文件头信息到输出文件
  72. fwrite(&writebitInfoHead, 1, sizeof(BITMAPINFOHEADER), wfile);//写回位图信息头信息到输出文件
  73. int width = bitInfoHead.biWidth;
  74. int height = bitInfoHead.biHeight;
  75. //分配内存空间把源图存入内存
  76. int l_width = WIDTHBYTES(width*bitInfoHead.biBitCount);//计算位图的实际宽度并确保它为4byte的倍数
  77. int write_width = WIDTHBYTES(writebitInfoHead.biWidth*writebitInfoHead.biBitCount);//计算写位图的实际宽度并确保它为4byte的倍数
  78. BYTE *pColorData = (BYTE *)malloc(height*l_width);//开辟内存空间存储图像数据
  79. memset(pColorData, 0, height*l_width);
  80. BYTE *pColorDataMid = (BYTE *)malloc(mywritewidth*MYDRAW_HEIGHT);//开辟内存空间存储图像处理之后数据
  81. memset(pColorDataMid, 0, mywritewidth*MYDRAW_HEIGHT);
  82. long nData = height*l_width;
  83. long write_nData = mywritewidth*MYDRAW_HEIGHT;//截取的位图数据区长度定义
  84. //把位图数据信息读到数组里
  85. fread(pColorData, 1, nData, pfile);//图像处理可通过操作这部分数据加以实现
  86. /*******************图像处理部分******************/
  87. /*******************双线性插值******************/
  88. for (int hnum = 0; hnum < MYDRAW_HEIGHT; hnum++)
  89. for (int wnum = 0; wnum < MYDRAW_WIDTH; wnum++)
  90. {
  91. double d_original_img_hnum = hnum*height / (double)MYDRAW_HEIGHT;
  92. double d_original_img_wnum = wnum*width / (double)MYDRAW_WIDTH;
  93. int i_original_img_hnum = d_original_img_hnum;
  94. int i_original_img_wnum = d_original_img_wnum;
  95. double distance_to_a_x = d_original_img_wnum - i_original_img_wnum;//在原图像中与a点的水平距离
  96. double distance_to_a_y = d_original_img_hnum - i_original_img_hnum;//在原图像中与a点的垂直距离
  97. int original_point_a = i_original_img_hnum*l_width + i_original_img_wnum * 3;//数组位置偏移量,对应于图像的各像素点RGB的起点,相当于点A
  98. int original_point_b = i_original_img_hnum*l_width + (i_original_img_wnum + 1) * 3;//数组位置偏移量,对应于图像的各像素点RGB的起点,相当于点B
  99. int original_point_c = (i_original_img_hnum + 1)*l_width + i_original_img_wnum * 3;//数组位置偏移量,对应于图像的各像素点RGB的起点,相当于点C
  100. int original_point_d = (i_original_img_hnum + 1)*l_width + (i_original_img_wnum + 1) * 3;//数组位置偏移量,对应于图像的各像素点RGB的起点,相当于点D
  101. if (i_original_img_hnum +1== MYDRAW_HEIGHT - 1)
  102. {
  103. original_point_c = original_point_a;
  104. original_point_d = original_point_b;
  105. }
  106. if (i_original_img_wnum +1== MYDRAW_WIDTH - 1)
  107. {
  108. original_point_b = original_point_a;
  109. original_point_d = original_point_c;
  110. }
  111. int pixel_point = hnum*write_width + wnum * 3;//映射尺度变换图像数组位置偏移量
  112. pColorDataMid[pixel_point] =
  113. pColorData[original_point_a] * (1 - distance_to_a_x)*(1 - distance_to_a_y) +
  114. pColorData[original_point_b] * distance_to_a_x*(1 - distance_to_a_y) +
  115. pColorData[original_point_c] * distance_to_a_y*(1 - distance_to_a_x) +
  116. pColorData[original_point_c] * distance_to_a_y*distance_to_a_x;
  117. pColorDataMid[pixel_point + 1] =
  118. pColorData[original_point_a + 1] * (1 - distance_to_a_x)*(1 - distance_to_a_y) +
  119. pColorData[original_point_b + 1] * distance_to_a_x*(1 - distance_to_a_y) +
  120. pColorData[original_point_c + 1] * distance_to_a_y*(1 - distance_to_a_x) +
  121. pColorData[original_point_c + 1] * distance_to_a_y*distance_to_a_x;
  122. pColorDataMid[pixel_point + 2] =
  123. pColorData[original_point_a + 2] * (1 - distance_to_a_x)*(1 - distance_to_a_y) +
  124. pColorData[original_point_b + 2] * distance_to_a_x*(1 - distance_to_a_y) +
  125. pColorData[original_point_c + 2] * distance_to_a_y*(1 - distance_to_a_x) +
  126. pColorData[original_point_c + 2] * distance_to_a_y*distance_to_a_x;
  127. }
  128. /*******************双线性插值******************/
  129. /*******************图像处理部分******************/
  130. fwrite(pColorDataMid, 1, write_nData, wfile); //将处理完图像数据区写回文件
  131. fclose(pfile);
  132. fclose(wfile);
  133. printf("图像处理完成\n");
  134. printf("运行时间为:%dms\n", int(((double)(clock() - now)) / CLOCKS_PER_SEC * 1000));//输出图像处理花费时间信息
  135. }

https://blog.csdn.net/py184473894/article/details/90739167

 

  1. def bilinear_interpolation(img,scale):
  2. dst_cols=(int)(img.shape[0]*scale)
  3. dst_rows=(int)(img.shape[1]*scale)
  4. img_dst=np.zeros([dst_cols,dst_rows])
  5. for i in range(dst_cols-1):
  6. for j in range(dst_rows-1):
  7. #坐标转换
  8. scr_x=(i+0.5)/scale-0.5
  9. scr_y=(j+0.5)/scale-0.5
  10. #整数部分
  11. int_x=int(scr_x)
  12. #小数部分
  13. float_x=scr_x-int_x
  14. int_y=int(scr_y)
  15. float_y=scr_y-int_y
  16. if int_x==img.shape[0]-1:
  17. int_x_p=img.shape[0]-1
  18. else:
  19. int_x_p=int_x+1
  20. if int_y==img.shape[1]-1:
  21. int_y_p=img.shape[1]-1
  22. else:
  23. int_y_p=int_y+1
  24. img_dst[i][j]=(1-float_x)*(1-float_y)*img[int_x][int_y]+(1-float_x)*float_y*img[int_x][int_y_p]+\
  25. float_x*(1-float_y)*img[int_x_p][int_y]+float_x*float_y*img[int_x_p][int_y_p]
  26. return img_dst

 

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/从前慢现在也慢/article/detail/75148
推荐阅读
相关标签
  

闽ICP备14008679号