Skip to content
Snippets Groups Projects
Commit 1f48e9b1 authored by Schneider Leo's avatar Schneider Leo
Browse files

add : manual sweep

parent ba0ac47c
No related branches found
No related tags found
No related merge requests found
import time import random
import numpy as np
import wandb as wdb from sweep_train import run_duo
if __name__ == '__main__': if __name__ == '__main__':
sweep_configuration = { sweep_configuration = {
...@@ -10,7 +10,7 @@ if __name__ == '__main__': ...@@ -10,7 +10,7 @@ if __name__ == '__main__':
"parameters": { "parameters": {
"epoches":{"value": 50}, "epoches":{"value": 50},
"eval_inter":{"value": 1}, "eval_inter":{"value": 1},
"noise_threshold": {"distribution" : "log_uniform_values", "max": 10000., "min": 0.0001}, "noise_threshold": {"distribution" : "log_uniform_values", "max": 10000., "min": 1},
"lr": {"distribution" : "log_uniform_values", "max": 0.01, "min": 0.0001}, "lr": {"distribution" : "log_uniform_values", "max": 0.01, "min": 0.0001},
"batch_size": {"value": 64}, "batch_size": {"value": 64},
"positive_prop": {"distribution" : "uniform","max": 95., "min": 5.}, "positive_prop": {"distribution" : "uniform","max": 95., "min": 5.},
...@@ -21,15 +21,21 @@ if __name__ == '__main__': ...@@ -21,15 +21,21 @@ if __name__ == '__main__':
"dataset_val_dir": {"value": "data/processed_data_wiff/npy_image/test_data"}, "dataset_val_dir": {"value": "data/processed_data_wiff/npy_image/test_data"},
"dataset_ref_dir": {"values": ["image_ref/img_ref","image_ref/img_ref_count_th_10","image_ref/img_ref_count_th_5"]}, "dataset_ref_dir": {"values": ["image_ref/img_ref","image_ref/img_ref_count_th_10","image_ref/img_ref_count_th_5"]},
}, },
"controller":{ "max_iter": 10
"type": "local"},
} }
sweep_id = wdb.sweep(sweep_configuration) for i in range(sweep_configuration["max_iter"]):
run_config={}
for p,v in sweep_configuration["parameters"].items() :
# Start the local controller if "value" in v:
sweep = wdb.controller(sweep_id) run_config[p]=v["value"]
while not sweep.done(): elif "values" in v:
sweep.print_status() run_config[p] = random.choice(v["values"])
sweep.step() elif "distribution" in v:
time.sleep(5) if v["distribution"]=="uniform":
run_config[p] = random.uniform(v["min"],v["max"])
elif v["distribution"]=="log_uniform_values":
run_config[p] = np.exp(random.uniform(np.log(v["min"]), np.log(v["max"])))
print('Launching run')
run_duo(run_config)
...@@ -6,7 +6,7 @@ import torch.nn as nn ...@@ -6,7 +6,7 @@ import torch.nn as nn
from model import Classification_model_duo_contrastive from model import Classification_model_duo_contrastive
import torch.optim as optim import torch.optim as optim
def train_duo(model, data_train, optimizer, loss_function, epoch, wandb): def train_duo(model, data_train, optimizer, loss_function, epoch):
model.train() model.train()
losses = 0. losses = 0.
acc = 0. acc = 0.
...@@ -40,7 +40,7 @@ def train_duo(model, data_train, optimizer, loss_function, epoch, wandb): ...@@ -40,7 +40,7 @@ def train_duo(model, data_train, optimizer, loss_function, epoch, wandb):
return losses, acc return losses, acc
def val_duo(model, data_test, loss_function, epoch, wandb): def val_duo(model, data_test, loss_function, epoch):
model.eval() model.eval()
losses = 0. losses = 0.
acc = 0. acc = 0.
...@@ -94,15 +94,15 @@ def run_duo(args): ...@@ -94,15 +94,15 @@ def run_duo(args):
print('Wandb initialised') print('Wandb initialised')
# load data # load data
data_train, data_val_batch, data_test_batch = load_data_duo(base_dir_train=args.dataset_train_dir, data_train, data_val_batch, data_test_batch = load_data_duo(base_dir_train=args['dataset_train_dir'],
base_dir_val=args.dataset_val_dir, base_dir_val=args['dataset_val_dir'],
base_dir_test=None, base_dir_test=None,
batch_size=args.batch_size, batch_size=args['batch_size'],
ref_dir=args.dataset_ref_dir, ref_dir=args['dataset_ref_dir'],
positive_prop=args.positive_prop, sampler=args.sampler) positive_prop=args['positive_prop'], sampler=args['sampler'])
# load model # load model
model = Classification_model_duo_contrastive(model=args.model, n_class=2) model = Classification_model_duo_contrastive(model=args['model'], n_class=2)
model.float() model.float()
# move parameters to GPU # move parameters to GPU
if torch.cuda.is_available(): if torch.cuda.is_available():
...@@ -118,15 +118,15 @@ def run_duo(args): ...@@ -118,15 +118,15 @@ def run_duo(args):
val_loss = [] val_loss = []
# init training # init training
loss_function = nn.CrossEntropyLoss() loss_function = nn.CrossEntropyLoss()
if args.opti == 'adam': if args['opti'] == 'adam':
optimizer = optim.Adam(model.parameters(), lr=args.lr) optimizer = optim.Adam(model.parameters(), lr=args['lr'])
# train model # train model
for e in range(args.epoches): for e in range(args['epoches']):
loss, acc = train_duo(model, data_train, optimizer, loss_function, e, args.wandb) loss, acc = train_duo(model, data_train, optimizer, loss_function, e)
train_loss.append(loss) train_loss.append(loss)
train_acc.append(acc) train_acc.append(acc)
if e % args.eval_inter == 0: if e % args['eval_inter'] == 0:
loss, acc, acc_contrastive = val_duo(model, data_val_batch, loss_function, e, args.wandb) loss, acc, acc_contrastive = val_duo(model, data_val_batch, loss_function, e)
val_loss.append(loss) val_loss.append(loss)
val_acc.append(acc) val_acc.append(acc)
val_cont_acc.append(acc_contrastive) val_cont_acc.append(acc_contrastive)
...@@ -134,6 +134,4 @@ def run_duo(args): ...@@ -134,6 +134,4 @@ def run_duo(args):
if __name__ == '__main__': if __name__ == '__main__':
config = wdb.config pass
print(config) \ No newline at end of file
run_duo(config)
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment