python对于图片的风格的处理--引用github上开源项目的
2019獨角獸企業(yè)重金招聘Python工程師標(biāo)準(zhǔn)>>>
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?關(guān)于python做過的比較好玩的事
這段時間學(xué)了python,對于python這種優(yōu)雅簡潔的語言深深吸引,在網(wǎng)上看到一個github上的開源項目,覺得比較有意思,就自己研究一番,現(xiàn)將結(jié)果記錄下來,以志自己這一路學(xué)習(xí)走來的歷程。
因為python初學(xué)者對于python的環(huán)境安裝的學(xué)習(xí)比較陡峭,所以安裝了Anaconda3 這一集成環(huán)境軟件。
這個項目能夠通過算法將一些世界名畫的風(fēng)格應(yīng)用到自己的照片。
第一步
安裝環(huán)境依賴 ?keras ? h5py ? tensorflow
第二步
配置運行環(huán)境
下載VGG16模型 https://pan.baidu.com/s/1i5wYN1z 放入如下目錄當(dāng)中
C:\Users\Administrator\.keras\models ?如果沒有可以創(chuàng)建
第三步
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from __future__ import print_function
from keras.preprocessing.image import load_img, img_to_array
from scipy.misc import imsave
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
import time
import argparse
from keras.applications import vgg16
from keras import backend as K
parser = argparse.ArgumentParser(description='Neural style transfer with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
? ? ? ? ? ? ? ? ? ? help='Path to the image to transform.')
parser.add_argument('style_reference_image_path', metavar='ref', type=str,
? ? ? ? ? ? ? ? ? ? help='Path to the style reference image.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
? ? ? ? ? ? ? ? ? ? help='Prefix for the saved results.')
parser.add_argument('--iter', type=int, default=20, required=False, ? ? ?# 此處為設(shè)置迭代次數(shù)
? ? ? ? ? ? ? ? ? ? help='Number of iterations to run.')
parser.add_argument('--content_weight', type=float, default=0.025, required=False,
? ? ? ? ? ? ? ? ? ? help='Content weight.')
parser.add_argument('--style_weight', type=float, default=1.0, required=False,
? ? ? ? ? ? ? ? ? ? help='Style weight.')
parser.add_argument('--tv_weight', type=float, default=1.0, required=False,
? ? ? ? ? ? ? ? ? ? help='Total Variation weight.')
args = parser.parse_args()
base_image_path = args.base_image_path
print('base_image_path',base_image_path)
style_reference_image_path = args.style_reference_image_path
print('style_reference_image_path',style_reference_image_path)
result_prefix = args.result_prefix
print('result_prefix',result_prefix)
iterations = args.iter
print('iter',iterations)
# these are the weights of the different loss components
total_variation_weight = args.tv_weight
print('tv_weight',total_variation_weight)
style_weight = args.style_weight
print('style_weight',style_weight)
content_weight = args.content_weight
print('content_weight',content_weight)
# dimensions of the generated picture.
width, height = load_img(base_image_path).size
print('width : %s,height : %s' %(width,height) )
img_nrows = 400
print('img_nrows',img_nrows)
img_ncols = int(width * img_nrows / height)
print('img_ncols',img_ncols)
# util function to open, resize and format pictures into appropriate tensors
def preprocess_image(image_path):
? ? img = load_img(image_path, target_size=(img_nrows, img_ncols))
? ? img = img_to_array(img)
? ? img = np.expand_dims(img, axis=0)
? ? img = vgg16.preprocess_input(img)
? ? return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
? ? if K.image_data_format() == 'channels_first':
? ? ? ? x = x.reshape((3, img_nrows, img_ncols))
? ? ? ? x = x.transpose((1, 2, 0))
? ? else:
? ? ? ? x = x.reshape((img_nrows, img_ncols, 3))
? ? # Remove zero-center by mean pixel
? ? x[:, :, 0] += 103.939
? ? x[:, :, 1] += 116.779
? ? x[:, :, 2] += 123.68
? ? # 'BGR'->'RGB'
? ? x = x[:, :, ::-1]
? ? x = np.clip(x, 0, 255).astype('uint8')
? ? return x
# get tensor representations of our images
base_image = K.variable(preprocess_image(base_image_path))
style_reference_image = K.variable(preprocess_image(style_reference_image_path))
# this will contain our generated image
if K.image_data_format() == 'channels_first':
? ? combination_image = K.placeholder((1, 3, img_nrows, img_ncols))
else:
? ? combination_image = K.placeholder((1, img_nrows, img_ncols, 3))
# combine the 3 images into a single Keras tensor
input_tensor = K.concatenate([base_image,
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? style_reference_image,
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? combination_image], axis=0)
# build the VGG16 network with our 3 images as input
# the model will be loaded with pre-trained ImageNet weights
model = vgg16.VGG16(input_tensor=input_tensor,
? ? ? ? ? ? ? ? ? ? weights='imagenet', include_top=False)
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
# compute the neural style loss
# first we need to define 4 util functions
# the gram matrix of an image tensor (feature-wise outer product)
def gram_matrix(x):
? ? assert K.ndim(x) == 3
? ? if K.image_data_format() == 'channels_first':
? ? ? ? features = K.batch_flatten(x)
? ? else:
? ? ? ? features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
? ? gram = K.dot(features, K.transpose(features))
? ? return gram
# the "style loss" is designed to maintain
# the style of the reference image in the generated image.
# It is based on the gram matrices (which capture style) of
# feature maps from the style reference image
# and from the generated image
def style_loss(style, combination):
? ? assert K.ndim(style) == 3
? ? assert K.ndim(combination) == 3
? ? S = gram_matrix(style)
? ? C = gram_matrix(combination)
? ? channels = 3
? ? size = img_nrows * img_ncols
? ? return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
# an auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image
def content_loss(base, combination):
? ? return K.sum(K.square(combination - base))
# the 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent
def total_variation_loss(x):
? ? assert K.ndim(x) == 4
? ? if K.image_data_format() == 'channels_first':
? ? ? ? a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1])
? ? ? ? b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:])
? ? else:
? ? ? ? a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :])
? ? ? ? b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :])
? ? return K.sum(K.pow(a + b, 1.25))
# combine these loss functions into a single scalar
loss = K.variable(0.)
layer_features = outputs_dict['block4_conv2']
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[2, :, :, :]
loss += content_weight * content_loss(base_image_features,
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? combination_features)
feature_layers = ['block1_conv1', 'block2_conv1',
? ? ? ? ? ? ? ? ? 'block3_conv1', 'block4_conv1',
? ? ? ? ? ? ? ? ? 'block5_conv1']
for layer_name in feature_layers:
? ? layer_features = outputs_dict[layer_name]
? ? style_reference_features = layer_features[1, :, :, :]
? ? combination_features = layer_features[2, :, :, :]
? ? sl = style_loss(style_reference_features, combination_features)
? ? loss += (style_weight / len(feature_layers)) * sl
loss += total_variation_weight * total_variation_loss(combination_image)
# get the gradients of the generated image wrt the loss
grads = K.gradients(loss, combination_image)
outputs = [loss]
if isinstance(grads, (list, tuple)):
? ? outputs += grads
else:
? ? outputs.append(grads)
f_outputs = K.function([combination_image], outputs)
def eval_loss_and_grads(x):
? ? if K.image_data_format() == 'channels_first':
? ? ? ? x = x.reshape((1, 3, img_nrows, img_ncols))
? ? else:
? ? ? ? x = x.reshape((1, img_nrows, img_ncols, 3))
? ? outs = f_outputs([x])
? ? loss_value = outs[0]
? ? if len(outs[1:]) == 1:
? ? ? ? grad_values = outs[1].flatten().astype('float64')
? ? else:
? ? ? ? grad_values = np.array(outs[1:]).flatten().astype('float64')
? ? return loss_value, grad_values
# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
class Evaluator(object):
? ? def __init__(self):
? ? ? ? self.loss_value = None
? ? ? ? self.grads_values = None
? ? def loss(self, x):
? ? ? ? assert self.loss_value is None
? ? ? ? loss_value, grad_values = eval_loss_and_grads(x)
? ? ? ? self.loss_value = loss_value
? ? ? ? self.grad_values = grad_values
? ? ? ? return self.loss_value
? ? def grads(self, x):
? ? ? ? assert self.loss_value is not None
? ? ? ? grad_values = np.copy(self.grad_values)
? ? ? ? self.loss_value = None
? ? ? ? self.grad_values = None
? ? ? ? return grad_values
evaluator = Evaluator()
# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the neural style loss
if K.image_data_format() == 'channels_first':
? ? x = np.random.uniform(0, 255, (1, 3, img_nrows, img_ncols)) - 128.
else:
? ? x = np.random.uniform(0, 255, (1, img_nrows, img_ncols, 3)) - 128.
for i in range(iterations):
? ? print('Start of iteration', i)
? ? start_time = time.time()
? ? x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?fprime=evaluator.grads, maxfun=20)
? ? print('Current loss value:', min_val)
? ? # save current generated image
? ? img = deprocess_image(x.copy())
? ? fname = result_prefix + '_at_iteration_%d.png' % i
? ? imsave(fname, img)
? ? end_time = time.time()
? ? print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i, end_time - start_time))
第四步
將該py文件放到一個文件夾中,也將模板圖片放到該文件夾中 ,將需要轉(zhuǎn)換的照片也放到該文件夾中,
啟動 Anaconda ?Prompt 進(jìn)入到該文件夾中,執(zhí)行?
python ? py文件名.py ? ? ?需要轉(zhuǎn)換的照片 ? ? ?模板圖片 ? ?生成的圖片(生成的圖片不需要加后綴)
python ? ?style.py ? ? ./yxc.jpg ? ? ?./style.jpg ? ? ? ./yxc_style ??
?
模板圖片
?
轉(zhuǎn)載于:https://my.oschina.net/u/3689521/blog/1560837
《新程序員》:云原生和全面數(shù)字化實踐50位技術(shù)專家共同創(chuàng)作,文字、視頻、音頻交互閱讀總結(jié)
以上是生活随笔為你收集整理的python对于图片的风格的处理--引用github上开源项目的的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: angular HttpClient p
- 下一篇: 面试时,当你有权提问时,别客气,这是个逆