Posted in Python onJuly 30, 2018
法一:
循环打印
模板
for (x, y) in zip(tf.global_variables(), sess.run(tf.global_variables())): print '\n', x, y
实例
# coding=utf-8 import tensorflow as tf def func(in_put, layer_name, is_training=True): with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE): bn = tf.contrib.layers.batch_norm(inputs=in_put, decay=0.9, is_training=is_training, updates_collections=None) return bn def main(): with tf.Graph().as_default(): # input_x input_x = tf.placeholder(dtype=tf.float32, shape=[1, 4, 4, 1]) import numpy as np i_p = np.random.uniform(low=0, high=255, size=[1, 4, 4, 1]) # outputs output = func(input_x, 'my', is_training=True) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) t = sess.run(output, feed_dict={input_x:i_p}) # 法一: 循环打印 for (x, y) in zip(tf.global_variables(), sess.run(tf.global_variables())): print '\n', x, y if __name__ == "__main__": main()
2017-09-29 10:10:22.714213: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1052] Creating TensorFlow device (/device:GPU:0) -> (device: 0, name: GeForce GTX 1070, pci bus id: 0000:01:00.0, compute capability: 6.1) <tf.Variable 'my/BatchNorm/beta:0' shape=(1,) dtype=float32_ref> [ 0.] <tf.Variable 'my/BatchNorm/moving_mean:0' shape=(1,) dtype=float32_ref> [ 13.46412563] <tf.Variable 'my/BatchNorm/moving_variance:0' shape=(1,) dtype=float32_ref> [ 452.62246704] Process finished with exit code 0
法二:
指定变量名打印
模板
print 'my/BatchNorm/beta:0', (sess.run('my/BatchNorm/beta:0'))
实例
# coding=utf-8 import tensorflow as tf def func(in_put, layer_name, is_training=True): with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE): bn = tf.contrib.layers.batch_norm(inputs=in_put, decay=0.9, is_training=is_training, updates_collections=None) return bn def main(): with tf.Graph().as_default(): # input_x input_x = tf.placeholder(dtype=tf.float32, shape=[1, 4, 4, 1]) import numpy as np i_p = np.random.uniform(low=0, high=255, size=[1, 4, 4, 1]) # outputs output = func(input_x, 'my', is_training=True) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) t = sess.run(output, feed_dict={input_x:i_p}) # 法二: 指定变量名打印 print 'my/BatchNorm/beta:0', (sess.run('my/BatchNorm/beta:0')) print 'my/BatchNorm/moving_mean:0', (sess.run('my/BatchNorm/moving_mean:0')) print 'my/BatchNorm/moving_variance:0', (sess.run('my/BatchNorm/moving_variance:0')) if __name__ == "__main__": main()
2017-09-29 10:12:41.374055: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1052] Creating TensorFlow device (/device:GPU:0) -> (device: 0, name: GeForce GTX 1070, pci bus id: 0000:01:00.0, compute capability: 6.1) my/BatchNorm/beta:0 [ 0.] my/BatchNorm/moving_mean:0 [ 8.08649635] my/BatchNorm/moving_variance:0 [ 368.03442383] Process finished with exit code 0
以上这篇tensorflow 打印内存中的变量方法就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持三水点靠木。
tensorflow 打印内存中的变量方法
- Author -
JNingWei声明:登载此文出于传递更多信息之目的,并不意味着赞同其观点或证实其描述。
Reply on: @reply_date@
@reply_contents@