原始程式碼:
def generate_pattern(layer_name, filter_index, size=150):
# 構建一個最大化激活的 損失函數 loss function
# 考慮的層的 第n個 filter
layer_output = model.get_layer(layer_name). output
loss = K.mean(layer_output[:, :, :, filter_index])
# 計算這種損失的輸入圖像的梯度
grads = K.gradients(loss, model.input)[0]
# Normalization gradient 梯度
grads /= (K.sqrt(K.mean(K.square(grads) )) + 1e-5)
# 函數返回 給定 輸入圖片的 損失 和 梯度
iterate = K.function([model.input], [loss, grads])
# 帶有一些噪音的灰色圖像
input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.
# Run 梯度上升 40步
step = 1.
for i in range(40):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
img = input_img_data[0]
return deprocess_image(img)
# 構建一個最大化激活的 損失函數 loss function
# 考慮的層的 第n個 filter
layer_output = model.get_layer(layer_name).
loss = K.mean(layer_output[:, :, :, filter_index])
# 計算這種損失的輸入圖像的梯度
grads = K.gradients(loss, model.input)[0]
# Normalization gradient 梯度
grads /= (K.sqrt(K.mean(K.square(grads)
# 函數返回 給定 輸入圖片的 損失 和 梯度
iterate = K.function([model.input], [loss, grads])
# 帶有一些噪音的灰色圖像
input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.
# Run 梯度上升 40步
step = 1.
for i in range(40):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
img = input_img_data[0]
return deprocess_image(img)
出現錯誤
Cell In[34], line 3 # 考慮的層的 第n個 filter ^ SyntaxError: invalid non-printable character U+00A0
修正成這樣就好了
def generate_pattern(layer_name, filter_index, size=150): # Construct a loss function that maximizes the activation for the nth filter in the considered layer layer_output = model.get_layer(layer_name).output loss = K.mean(layer_output[:, :, :, filter_index]) # Compute the gradient of this loss with respect to the input image grads = K.gradients(loss, model.input)[0] # Normalize the gradient grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5) # Define a function to return the loss and gradient given an input image iterate = K.function([model.input], [loss, grads]) # Generate a grayscale image with some noise input_img_data = np.random.random((1, size, size, 3)) * 20 + 128. # Run gradient ascent for 40 steps step = 1. for i in range(40): loss_value, grads_value = iterate([input_img_data]) input_img_data += grads_value * step img = input_img_data[0] return deprocess_image(img)
评论
发表评论