in #U57fa#U7840#U6559#U7a0b/A2-#U795e#U7ecf#U7f51#U7edc#U57fa#U672c#U539f#U7406/#U7b2c5#U6b65 - #U975e#U7ebf#U6027#U5206#U7c7b/src/ch10-NonLinearBinaryClassification/HelperClass2/NeuralNet_2_1.py [0:0]
def train(self, dataReader, checkpoint, need_test):
# calculate loss to decide the stop condition
self.loss_trace = TrainingHistory_2_1()
self.loss_func = LossFunction_1_1(self.hp.net_type)
if self.hp.batch_size == -1:
self.hp.batch_size = dataReader.num_train
max_iteration = math.ceil(dataReader.num_train / self.hp.batch_size)
checkpoint_iteration = (int)(max_iteration * checkpoint)
need_stop = False
for epoch in range(self.hp.max_epoch):
#print("epoch=%d" %epoch)
dataReader.Shuffle()
for iteration in range(max_iteration):
# get x and y value for one sample
batch_x, batch_y = dataReader.GetBatchTrainSamples(self.hp.batch_size, iteration)
# get z from x,y
batch_a = self.forward(batch_x)
# calculate gradient of w and b
self.backward(batch_x, batch_y, batch_a)
# update w,b
self.update()
total_iteration = epoch * max_iteration + iteration
if (total_iteration+1) % checkpoint_iteration == 0:
need_stop = self.CheckErrorAndLoss(dataReader, batch_x, batch_y, epoch, total_iteration)
if need_stop:
break
#end if
#end if
# end for
if need_stop:
break
# end for
self.SaveResult()
#self.CheckErrorAndLoss(dataReader, batch_x, batch_y, epoch, total_iteration)
if need_test:
print("testing...")
accuracy = self.Test(dataReader)
print(accuracy)