in #U57fa#U7840#U6559#U7a0b/A2-#U795e#U7ecf#U7f51#U7edc#U57fa#U672c#U539f#U7406/#U7b2c6#U6b65 - #U6a21#U578b#U90e8#U7f72/src/ch13-ModelInference/HelperClass2/NeuralNet_3_0.py [0:0]
def train(self, dataReader, checkpoint, need_test):
t0 = time.time()
# calculate loss to decide the stop condition
self.loss_trace = TrainingHistory_2_3()
self.loss_func = LossFunction_1_1(self.hp.net_type)
loss = 10
if self.hp.batch_size == -1:
self.hp.batch_size = dataReader.num_train
max_iteration = math.ceil(dataReader.num_train / self.hp.batch_size)
checkpoint_iteration = (int)(max_iteration * checkpoint)
need_stop = False
for epoch in range(self.hp.max_epoch):
dataReader.Shuffle()
for iteration in range(max_iteration):
# get x and y value for one sample
batch_x, batch_y = dataReader.GetBatchTrainSamples(self.hp.batch_size, iteration)
# get z from x,y
self.forward(batch_x)
# calculate gradient of w and b
self.backward(batch_x, batch_y)
# update w,b
self.update()
total_iteration = epoch * max_iteration + iteration
if (total_iteration+1) % checkpoint_iteration == 0:
need_stop = self.CheckErrorAndLoss(dataReader, batch_x, batch_y, epoch, total_iteration)
if need_stop:
break
#end if
#end if
# end for
if need_stop:
break
# end for
self.SaveResult()
t1 = time.time()
print("time used:", t1 - t0)
#self.CheckErrorAndLoss(dataReader, batch_x, batch_y, epoch, total_iteration)
if need_test:
print("testing...")
accuracy = self.Test(dataReader)
print(accuracy)