research/active_learning/archive/good_run.py [154:214]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def train_eval_classifier(clf_model, unlabeled_dataset, model, pth, epochs = 15):
    trainset_query = Detection.select(Detection.id,Oracle.label).join(Oracle).where(Detection.kind == DetectionKind.UserDetection.value) 
    train_dataset = SQLDataLoader(trainset_query, os.path.join(args.run_data, 'crops'), is_training= True)
    train_dataset.updateEmbedding(model)
    train_dataset.embedding_mode()
    train_dataset.train()
    clf_criterion= nn.CrossEntropyLoss()
    clf_optimizer = torch.optim.Adam(clf_model.parameters(), lr=0.001, weight_decay= 0.0005)
    clf_e= Engine(clf_model,clf_criterion,clf_optimizer, verbose= True, print_freq= 1)

    clf_model.train()
    clf_train_loader = train_dataset.getSingleLoader( batch_size = 64)
    for i in range(epochs):
      clf_e.train_one_epoch(clf_train_loader, i, True)
    clf_model.eval()
    unlabeled_dataset.embedding_mode()
    unlabeled_dataset.eval()
    eval_loader = unlabeled_dataset.getSingleLoader(batch_size = 1024)
    clf_e.validate(eval_loader, True)

def custom_policy(step):
  details_str= '41, 56, 66, 0.001, 0.0005, 0.0001, 0.00005'
  details= [float(x) for x in details_str.split(",")]
  length = len(details)
  for i,x in enumerate(details[0:int((length-1)/2)]):
    if step<= int(x):
      return details[int((length-1)/2)+i]
  return details[-1]
   
def adjust_lr(optimizer, step):
  param= custom_policy(step)
  for param_group in optimizer.param_groups:
    param_group['lr'] = param

def finetune_embedding(model, train_dataset, P, K, epochs):
    train_dataset.image_mode()
    train_loader = train_dataset.getBalancedLoader(P = P, K = K)
    criterion = OnlineTripletLoss(1, RandomNegativeTripletSelector(1))
    params = model.parameters()
    optimizer = torch.optim.Adam(params, lr = 0.0001)#, weight_decay = 0.0005)
    e= Engine(model, criterion, optimizer, verbose = True, print_freq = 10)
    for epoch in range(epochs):
        e.train_one_epoch(train_loader, epoch, False)
 

def main():
    args = parser.parse_args()
    print("DB Connect")
    db_path = os.path.join(args.run_data, os.path.basename(args.run_data)) + ".db"
    print(db_path)
    db = SqliteDatabase(db_path)
    proxy.initialize(db)
    db.connect()
    print("connected")
    print("CompleteLoop")
    
    checkpoint = load_checkpoint(args.base_model)
    embedding_net = EmbeddingNet(checkpoint['arch'], checkpoint['feat_dim'], False)
    #embedding_net = EmbeddingNet('resnet50', 256, True)
    model = torch.nn.DataParallel(embedding_net).cuda()
    model.load_state_dict(checkpoint['state_dict'])
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



research/active_learning/archive/run_bk.py [133:193]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def train_eval_classifier(clf_model, unlabeled_dataset, model, pth, epochs = 15):
    trainset_query = Detection.select(Detection.id,Oracle.label).join(Oracle).where(Detection.kind == DetectionKind.UserDetection.value) 
    train_dataset = SQLDataLoader(trainset_query, os.path.join(args.run_data, 'crops'), is_training= True)
    train_dataset.updateEmbedding(model)
    train_dataset.embedding_mode()
    train_dataset.train()
    clf_criterion= nn.CrossEntropyLoss()
    clf_optimizer = torch.optim.Adam(clf_model.parameters(), lr=0.001, weight_decay= 0.0005)
    clf_e= Engine(clf_model,clf_criterion,clf_optimizer, verbose= True, print_freq= 1)

    clf_model.train()
    clf_train_loader = train_dataset.getSingleLoader( batch_size = 64)
    for i in range(epochs):
      clf_e.train_one_epoch(clf_train_loader, i, True)
    clf_model.eval()
    unlabeled_dataset.embedding_mode()
    unlabeled_dataset.eval()
    eval_loader = unlabeled_dataset.getSingleLoader(batch_size = 1024)
    clf_e.validate(eval_loader, True)

def custom_policy(step):
  details_str= '41, 56, 66, 0.001, 0.0005, 0.0001, 0.00005'
  details= [float(x) for x in details_str.split(",")]
  length = len(details)
  for i,x in enumerate(details[0:int((length-1)/2)]):
    if step<= int(x):
      return details[int((length-1)/2)+i]
  return details[-1]
   
def adjust_lr(optimizer, step):
  param= custom_policy(step)
  for param_group in optimizer.param_groups:
    param_group['lr'] = param

def finetune_embedding(model, train_dataset, P, K, epochs):
    train_dataset.image_mode()
    train_loader = train_dataset.getBalancedLoader(P = P, K = K)
    criterion = OnlineTripletLoss(1, RandomNegativeTripletSelector(1))
    params = model.parameters()
    optimizer = torch.optim.Adam(params, lr = 0.0001)#, weight_decay = 0.0005)
    e= Engine(model, criterion, optimizer, verbose = True, print_freq = 10)
    for epoch in range(epochs):
        e.train_one_epoch(train_loader, epoch, False)
 

def main():
    args = parser.parse_args()
    print("DB Connect")
    db_path = os.path.join(args.run_data, os.path.basename(args.run_data)) + ".db"
    print(db_path)
    db = SqliteDatabase(db_path)
    proxy.initialize(db)
    db.connect()
    print("connected")
    print("CompleteLoop")
    
    checkpoint = load_checkpoint(args.base_model)
    embedding_net = EmbeddingNet(checkpoint['arch'], checkpoint['feat_dim'], False)
    #embedding_net = EmbeddingNet('resnet50', 256, True)
    model = torch.nn.DataParallel(embedding_net).cuda()
    model.load_state_dict(checkpoint['state_dict'])
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



