王荣胜

卷积神经网络的卷积和池化计算

cc.gif
import numpy as np
# FilterW0(3*3*3) 卷积核W0
filter_w0_1 = np.array([[-1,1,0],
[0,1,0],
[0,1,1]])
filter_w0_2 = np.array([[-1,-1,0],
[0,0,0],
[0,-1,0]])
filter_w0_3 = np.array([[0,0,-1],
[0,1,0],
[1,-1,-1]])
# FilterW1(3*3*3) 卷积核W1
filter_w1_1 = np.array([[1,1,-1],
[-1,-1,1],
[0,-1,1]])
filter_w1_2 = np.array([[0,1,0],
[-1,0,-1],
[-1,1,0]])
filter_w1_3 = np.array([[-1,0,0],
[-1,0,1],
[-1,0,0]])
# Input Volume(+pad 1)输入+1层Zero padding
# Zero padding对于图像边缘部分的特征提取是很有帮助的
input_img1 = np.array([
       [0,0,0,0,0,0,0],
       [0,0,1,1,0,2,0],
       [0,2,2,2,2,1,0],
       [0,1,0,0,2,0,0],
       [0,0,1,1,0,0,0],
       [0,1,2,0,0,2,0],
       [0,0,0,0,0,0,0]])
input_img2 = np.array([
       [0,0,0,0,0,0,0],
       [0,1,0,2,2,0,0],
       [0,0,0,0,2,0,0],
       [0,1,2,1,2,1,0],
       [0,1,0,0,0,0,0],
       [0,1,2,1,1,1,0],
       [0,0,0,0,0,0,0]])
input_img3 = np.array([
       [0,0,0,0,0,0,0],
       [0,2,1,2,0,0,0],
       [0,1,0,0,1,0,0],
       [0,0,2,1,0,1,0],
       [0,0,1,2,2,2,0],
       [0,2,1,0,0,1,0],
       [0,0,0,0,0,0,0]])
# Bias 偏置矩阵
bias_1 = np.ones((7, 7))
bias_2 = np.zeros((7,7))

# 卷积操作 strde为步长,此时为2
def cov(img,f,strde):
    inw,inh = img.shape
    w,h=f.shape
    outw = (inw -w )//strde + 1
    outh = (inh -h )//strde + 1
    arr = np.zeros(shape=(outw, outh))
    for g in range(outh):
       for t in range(outw):
          s = 0
          for i in range(w):
              for j in range(h):
                  s+=img[i+g*strde][j+t*strde]*f[i][j]
                  # s = img[i][j] * f[i][j]
          arr[g][t]=s
    return arr
# 矩阵相加操作
def matrix_add(matrix1,matrix2):
    total_element = [matrix1[i][j] + matrix2[i][j] for i in range(len(matrix1)) for j in range(len(matrix1))]
    new_matrix = [total_element[x:x+len(matrix1)] for x in range(0,len(total_element),len(matrix1))]
    return new_matrix

print("经常第一层卷积后:")
output_w0_1 = cov(input_img1,filter_w0_1,2)
output_w0_2 = cov(input_img2,filter_w0_2,2)
output_w0_3 = cov(input_img3,filter_w0_3,2)
output_add_1 = matrix_add(output_w0_1,output_w0_2)
output_add_2 = matrix_add(output_add_1,output_w0_3)
output_add_3 = matrix_add(output_add_2,bias_1)
print(output_add_3)

print("经常第二层卷积后:")
output_w1_1 = cov(input_img1,filter_w1_1,2)
output_w1_2 = cov(input_img2,filter_w1_2,2)
output_w1_3 = cov(input_img3,filter_w1_3,2)
output_add_4 = matrix_add(output_w1_1,output_w1_2)
output_add_5 = matrix_add(output_add_4,output_w1_3)
output_add_6 = matrix_add(output_add_5,bias_2)
print(output_add_6)

# 进行步长为1的最大池化和平均池化
# 参考https://blog.csdn.net/MrCharles/article/details/105299448?utm_medium=distribute.pc_relevant_t0.none-task-blog-BlogCommendFromMachineLearnPai2-1.add_param_isCf&depth_1-utm_source=distribute.pc_relevant_t0.none-task-blog-BlogCommendFromMachineLearnPai2-1.add_param_isCf
小白
本文作者:王荣胜 |「邮箱 」| 「QQ」 | 「QQ群
文章出处:https://sqdxwz.top/
版权声明:本文章采用「知识共享署名-相同方式共享 4.0 国际许可协议」许可。
Powered by 王荣胜 | | 载入天数...载入时分秒...
Google     Server provider     jsDelivr    
正在加载今日诗词....