2017-11-17 8 views
1

간단한 작업 : 각 에포크 이후 정확한 유효성 검사 정확도를 계산하는 N 개 신기원에 대한 교육을 실행합니다. Epoch 크기는 전체 교육 세트 또는 사전 정의 된 반복 횟수와 동일 할 수 있습니다. 유효성 검사 도중 모든 유효성 검사 집합 입력은 정확히 한 번만 평가되어야합니다.Tensorflow 데이터 집합 API를 교육 및 유효성 검사 집합으로 사용하는 방법

one_shot_iterators, 초기화 가능한 반복자 및/또는 해당 작업의 핸들을 함께 혼합하는 가장 좋은 방법은 무엇입니까?

0) 보조 코드를 모두 공유 :이 솔루션은 내가 2 peaces에 온다 예상보다 많이 지저분이기 때문에

def build_training_dataset(): 
    pass 

def build_validation_dataset(): 
    pass 

def construct_train_op(dataset): 
    pass 

def magic(iterator): 
    pass 

USE_CUSTOM_EPOCH_SIZE = True 
CUSTOM_EPOCH_SIZE = 60 
MAX_EPOCHS = 100 


training_dataset = build_training_dataset() 
validation_dataset = build_validation_dataset() 


# Magic goes here to build a nice one-instance dataset 
dataset = magic(training_dataset, validation_dataset) 

train_op = construct_train_op(dataset) 

# Run N epochs in which the training dataset is traversed, followed by the 
# validation dataset. 
with tf.Session() as sess: 
    for epoch in MAX_EPOCHS: 

     # train 
     if USE_CUSTOM_EPOCH_SIZE: 
      for _ in range(CUSTOM_EPOCH_SIZE): 
       sess.run(train_op) 
     else: 
      while True: 
       # I guess smth like this: 
       try: 
        sess.run(train_op) 
       except tf.errors.OutOfRangeError: 
        break # we are done with the epoch 

     # validation 
     validation_predictions = [] 
     while True: 
      try: 
       np.append(validation_predictions, sess.run(train_op)) # but for validation this time 
      except tf.errors.OutOfRangeError: 
       print('epoch %d finished with accuracy: %f' % (epoch validation_predictions.mean())) 
       break 

답변

4

: 여기

내가 그 일을해야 볼 방법의 발판입니다 예 :

,369 : 열차 집합 크기와 동일한 에포크 들어

USE_CUSTOM_EPOCH_SIZE = True 
CUSTOM_EPOCH_SIZE = 60 
MAX_EPOCHS = 100 

TRAIN_SIZE = 500 
VALIDATION_SIZE = 145 
BATCH_SIZE = 64 


def construct_train_op(batch): 
    return batch 


def build_train_dataset(): 
    return tf.data.Dataset.range(TRAIN_SIZE) \ 
     .map(lambda x: x + tf.random_uniform([], -10, 10, tf.int64)) \ 
     .batch(BATCH_SIZE) 

def build_test_dataset(): 
    return tf.data.Dataset.range(VALIDATION_SIZE) \ 
     .batch(BATCH_SIZE) 

1)

# datasets construction 
training_dataset = build_train_dataset().repeat() # CHANGE 1 
validation_dataset = build_test_dataset() 

# handle constructions. Handle allows us to feed data from different dataset by providing a parameter in feed_dict 
handle = tf.placeholder(tf.string, shape=[]) 
iterator = tf.data.Iterator.from_string_handle(handle, training_dataset.output_types, training_dataset.output_shapes) 
next_element = iterator.get_next() 


train_op = construct_train_op(next_element) 

training_iterator = training_dataset.make_one_shot_iterator() # CHANGE 2 
validation_iterator = validation_dataset.make_initializable_iterator() 

with tf.Session() as sess: 
    training_handle = sess.run(training_iterator.string_handle()) 
    validation_handle = sess.run(validation_iterator.string_handle()) 

    for epoch in range(MAX_EPOCHS): 
     #train 
     # CHANGE 3: no initiazation, not try/catch 
     for _ in range(CUSTOM_EPOCH_SIZE): 
      train_output = sess.run(train_op, feed_dict={handle: training_handle}) 


     # validation 
     validation_predictions = [] 
     sess.run(validation_iterator.initializer) 
     while True: 
      try: 
       pred = sess.run(train_op, feed_dict={handle: validation_handle}) 
       validation_predictions = np.append(validation_predictions, pred) 
      except tf.errors.OutOfRangeError: 
       assert len(validation_predictions) == VALIDATION_SIZE 
       print('Epoch %d finished with accuracy: %f' % (epoch, np.mean(validation_predictions))) 
       break 
: 사용자 정의 시대 크기 1,363,210
# datasets construction 
training_dataset = build_train_dataset() 
validation_dataset = build_test_dataset() 

# handle constructions. Handle allows us to feed data from different dataset by providing a parameter in feed_dict 
handle = tf.placeholder(tf.string, shape=[]) 
iterator = tf.data.Iterator.from_string_handle(handle, training_dataset.output_types, training_dataset.output_shapes) 
next_element = iterator.get_next() 

train_op = construct_train_op(next_element) 

training_iterator = training_dataset.make_initializable_iterator() 
validation_iterator = validation_dataset.make_initializable_iterator() 

with tf.Session() as sess: 
    training_handle = sess.run(training_iterator.string_handle()) 
    validation_handle = sess.run(validation_iterator.string_handle()) 

    for epoch in range(MAX_EPOCHS): 
     #train 
     sess.run(training_iterator.initializer) 
     total_in_train = 0 
     while True: 
      try: 
       train_output = sess.run(train_op, feed_dict={handle: training_handle}) 
       total_in_train += len(train_output) 
      except tf.errors.OutOfRangeError: 
       assert total_in_train == TRAIN_SIZE 
       break # we are done with the epoch 

     # validation 
     validation_predictions = [] 
     sess.run(validation_iterator.initializer) 
     while True: 
      try: 
       pred = sess.run(train_op, feed_dict={handle: validation_handle}) 
       validation_predictions = np.append(validation_predictions, pred) 
      except tf.errors.OutOfRangeError: 
       assert len(validation_predictions) == VALIDATION_SIZE 
       print('Epoch %d finished with accuracy: %f' % (epoch, np.mean(validation_predictions))) 
       break 

2)