CNN+LSTM+CTC
生活随笔
收集整理的這篇文章主要介紹了
CNN+LSTM+CTC
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
需求:調研CNN+LSTM+CTC的實現
解決方案; 參考github實現
示例代碼:
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ tf CNN+LSTM+CTC 訓練識別不定長數字字符圖片@author: pengyuanjie """ from com.shenl.ocrTensorflowCnn.genIDCard import *import numpy as np import time import os os.environ['TF_CPP_MIN_LOG_LEVEL']='2' import tensorflow as tf''' TF_CPP_MIN_LOG_LEVEL指定Tensorflow的日志級別 - 0:顯示所有日志(默認等級) - 1:顯示info、warning和error日志 - 2:顯示warning和error信息 - 3:顯示error日志信息 '''#定義一些常量 #圖片大小,32 x 256(高*寬) OUTPUT_SHAPE = (32,256)#訓練最大輪次 num_epochs = 10000num_hidden = 64 num_layers = 1#生成一個圖片對象 obj = gen_id_card()num_classes = obj.len + 1 + 1 # 10位數字 + blank + ctc blank#初始化學習速率 INITIAL_LEARNING_RATE = 1e-3 DECAY_STEPS = 5000 REPORT_STEPS = 100 LEARNING_RATE_DECAY_FACTOR = 0.9 # The learning rate decay factor MOMENTUM = 0.9DIGITS='0123456789' BATCHES = 10 BATCH_SIZE = 64 TRAIN_SIZE = BATCHES * BATCH_SIZEdef decode_sparse_tensor(sparse_tensor):#print("sparse_tensor = ", sparse_tensor)decoded_indexes = list()current_i = 0current_seq = []for offset, i_and_index in enumerate(sparse_tensor[0]):i = i_and_index[0]if i != current_i:decoded_indexes.append(current_seq)current_i = icurrent_seq = list()current_seq.append(offset)decoded_indexes.append(current_seq)#print("decoded_indexes = ", decoded_indexes)result = []for index in decoded_indexes:#print("index = ", index)result.append(decode_a_seq(index, sparse_tensor))#print(result)return resultdef decode_a_seq(indexes, spars_tensor):decoded = []for m in indexes:str = DIGITS[spars_tensor[1][m]]decoded.append(str)# Replacing blank label to none#str_decoded = str_decoded.replace(chr(ord('9') + 1), '')# Replacing space label to space#str_decoded = str_decoded.replace(chr(ord('0') - 1), ' ')# print("ffffffff", str_decoded)return decodeddef report_accuracy(decoded_list, test_targets):original_list = decode_sparse_tensor(test_targets)detected_list = decode_sparse_tensor(decoded_list)true_numer = 0if len(original_list) != len(detected_list):print("len(original_list)", len(original_list), "len(detected_list)", len(detected_list)," test and detect length desn't match")returnprint("T/F: original(length) <-------> detectcted(length)")for idx, number in enumerate(original_list):detect_number = detected_list[idx]hit = (number == detect_number)print(hit, number, "(", len(number), ") <-------> ", detect_number, "(", len(detect_number), ")")if hit:true_numer = true_numer + 1print("Test Accuracy:", true_numer * 1.0 / len(original_list))#轉化一個序列列表為稀疏矩陣 def sparse_tuple_from(sequences, dtype=np.int32):"""Create a sparse representention of x.Args:sequences: a list of lists of type dtype where each element is a sequenceReturns:A tuple with (indices, values, shape)"""indices = []values = []for n, seq in enumerate(sequences):indices.extend(zip([n] * len(seq), range(len(seq)))) ##python3 是rangevalues.extend(seq)indices = np.asarray(indices, dtype=np.int64)values = np.asarray(values, dtype=dtype)shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1], dtype=np.int64)return indices, values, shapedef weight_variable(shape):initial = tf.truncated_normal(shape, stddev=0.5)return tf.Variable(initial) def bias_variable(shape):initial = tf.constant(0.1, shape=shape)return tf.Variable(initial)def conv2d(x, W, stride=(1, 1), padding='SAME'):return tf.nn.conv2d(x, W, strides=[1, stride[0], stride[1], 1],padding=padding) def max_pool(x, ksize=(2, 2), stride=(2, 2)):return tf.nn.max_pool(x, ksize=[1, ksize[0], ksize[1], 1],strides=[1, stride[0], stride[1], 1], padding='SAME')def avg_pool(x, ksize=(2, 2), stride=(2, 2)):return tf.nn.avg_pool(x, ksize=[1, ksize[0], ksize[1], 1],strides=[1, stride[0], stride[1], 1], padding='SAME')# 生成一個訓練batch def get_next_batch(batch_size=128):obj = gen_id_card()#(batch_size,256,32)inputs = np.zeros([batch_size, OUTPUT_SHAPE[1],OUTPUT_SHAPE[0]])codes = []for i in range(batch_size):#生成不定長度的字串##image, text, vec = obj.gen_image(True) 不應該傳該參數Trueimage, text, vec = obj.gen_image()#np.transpose 矩陣轉置 (32*256,) => (32,256) => (256,32)inputs[i,:] = np.transpose(image.reshape((OUTPUT_SHAPE[0],OUTPUT_SHAPE[1])))codes.append(list(text))targets = [np.asarray(i) for i in codes]print (targets)sparse_targets = sparse_tuple_from(targets)#(batch_size,) 值都是256seq_len = np.ones(inputs.shape[0]) * OUTPUT_SHAPE[1]return inputs, sparse_targets, seq_len#定義CNN網絡,處理圖片, def convolutional_layers():#輸入數據,shape [batch_size, max_stepsize, num_features]inputs = tf.placeholder(tf.float32, [None, None, OUTPUT_SHAPE[0]])#第一層卷積層, 32*256*1 => 16*128*48W_conv1 = weight_variable([5, 5, 1, 48])b_conv1 = bias_variable([48])x_expanded = tf.expand_dims(inputs, 3)h_conv1 = tf.nn.relu(conv2d(x_expanded, W_conv1) + b_conv1)h_pool1 = max_pool(h_conv1, ksize=(2, 2), stride=(2, 2))#第二層, 16*128*48 => 16*64*64W_conv2 = weight_variable([5, 5, 48, 64])b_conv2 = bias_variable([64])h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)h_pool2 = max_pool(h_conv2, ksize=(2, 1), stride=(2, 1))#第三層, 16*64*64 => 8*32*128W_conv3 = weight_variable([5, 5, 64, 128])b_conv3 = bias_variable([128])h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)h_pool3 = max_pool(h_conv3, ksize=(2, 2), stride=(2, 2))#全連接W_fc1 = weight_variable([16 * 8 * OUTPUT_SHAPE[1], OUTPUT_SHAPE[1]])b_fc1 = bias_variable([OUTPUT_SHAPE[1]])conv_layer_flat = tf.reshape(h_pool3, [-1, 16 * 8 * OUTPUT_SHAPE[1]])features = tf.nn.relu(tf.matmul(conv_layer_flat, W_fc1) + b_fc1)#(batchsize,256)shape = tf.shape(features)features = tf.reshape(features, [shape[0], OUTPUT_SHAPE[1], 1]) # batchsize * outputshape * 1return inputs,featuresdef get_train_model():#features = convolutional_layers()#print features.get_shape()inputs = tf.placeholder(tf.float32, [None, None, OUTPUT_SHAPE[0]])#定義ctc_loss需要的稀疏矩陣targets = tf.sparse_placeholder(tf.int32)#1維向量 序列長度 [batch_size,]seq_len = tf.placeholder(tf.int32, [None])#定義LSTM網絡cell = tf.contrib.rnn.LSTMCell(num_hidden, state_is_tuple=True)stack = tf.contrib.rnn.MultiRNNCell([cell] * num_layers, state_is_tuple=True)outputs, _ = tf.nn.dynamic_rnn(cell, inputs, seq_len, dtype=tf.float32)shape = tf.shape(inputs)batch_s, max_timesteps = shape[0], shape[1]outputs = tf.reshape(outputs, [-1, num_hidden])W = tf.Variable(tf.truncated_normal([num_hidden,num_classes],stddev=0.1), name="W")b = tf.Variable(tf.constant(0., shape=[num_classes]), name="b")logits = tf.matmul(outputs, W) + blogits = tf.reshape(logits, [batch_s, -1, num_classes])logits = tf.transpose(logits, (1, 0, 2))return logits, inputs, targets, seq_len, W, bdef train():global_step = tf.Variable(0, trainable=False)learning_rate = tf.train.exponential_decay(INITIAL_LEARNING_RATE,global_step,DECAY_STEPS,LEARNING_RATE_DECAY_FACTOR,staircase=True)logits, inputs, targets, seq_len, W, b = get_train_model()loss = tf.nn.ctc_loss(labels=targets,inputs=logits, sequence_length=seq_len)cost = tf.reduce_mean(loss)#optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=MOMENTUM).minimize(cost, global_step=global_step)optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss,global_step=global_step)decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits, seq_len, merge_repeated=False)acc = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32), targets))init = tf.global_variables_initializer()def do_report():test_inputs,test_targets,test_seq_len = get_next_batch(BATCH_SIZE)test_feed = {inputs: test_inputs,targets: test_targets,seq_len: test_seq_len}dd, log_probs, accuracy = session.run([decoded[0], log_prob, acc], test_feed)report_accuracy(dd, test_targets)# decoded_list = decode_sparse_tensor(dd)def do_batch():train_inputs, train_targets, train_seq_len = get_next_batch(BATCH_SIZE)feed = {inputs: train_inputs, targets: train_targets, seq_len: train_seq_len}b_loss,b_targets, b_logits, b_seq_len,b_cost, steps, _ = session.run([loss, targets, logits, seq_len, cost, global_step, optimizer], feed)#print b_loss#print b_targets, b_logits, b_seq_lenprint (b_cost, steps)if steps > 0 and steps % REPORT_STEPS == 0:do_report()#save_path = saver.save(session, "ocr.model", global_step=steps)# print(save_path)return b_cost, stepswith tf.Session() as session:session.run(init)saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)for curr_epoch in range(num_epochs):print("Epoch.......", curr_epoch)train_cost = train_ler = 0for batch in range(BATCHES):start = time.time()c, steps = do_batch()train_cost += c * BATCH_SIZEseconds = time.time() - startprint("Step:", steps, ", batch seconds:", seconds)train_cost /= TRAIN_SIZEtrain_inputs, train_targets, train_seq_len = get_next_batch(BATCH_SIZE)val_feed = {inputs: train_inputs,targets: train_targets,seq_len: train_seq_len}val_cost, val_ler, lr, steps = session.run([cost, acc, learning_rate, global_step], feed_dict=val_feed)log = "Epoch {}/{}, steps = {}, train_cost = {:.3f}, train_ler = {:.3f}, val_cost = {:.3f}, val_ler = {:.3f}, time = {:.3f}s, learning_rate = {}"print(log.format(curr_epoch + 1, num_epochs, steps, train_cost, train_ler, val_cost, val_ler, time.time() - start, lr))if __name__ == '__main__':inputs, sparse_targets,seq_len = get_next_batch(2)decode_sparse_tensor(sparse_targets);train()總結
以上是生活随笔為你收集整理的CNN+LSTM+CTC的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 文殊菩萨心咒正确念法及回向(文殊菩萨心咒
- 下一篇: 饭桌开场白大全(饭桌上的开场白客套话)