Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
pseudo_image
Manage
Activity
Members
Labels
Plan
Issues
0
Issue boards
Milestones
Wiki
Code
Merge requests
0
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Léo Schneider
pseudo_image
Commits
1f48e9b1
Commit
1f48e9b1
authored
1 month ago
by
Schneider Leo
Browse files
Options
Downloads
Patches
Plain Diff
add : manual sweep
parent
ba0ac47c
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
image_ref/main_sweep.py
+19
-13
19 additions, 13 deletions
image_ref/main_sweep.py
image_ref/sweep_train.py
+15
-17
15 additions, 17 deletions
image_ref/sweep_train.py
with
34 additions
and
30 deletions
image_ref/main_sweep.py
+
19
−
13
View file @
1f48e9b1
import
time
import
random
import
numpy
as
np
import
wandb
as
wdb
from
sweep_train
import
run_duo
if
__name__
==
'
__main__
'
:
if
__name__
==
'
__main__
'
:
sweep_configuration
=
{
sweep_configuration
=
{
...
@@ -10,7 +10,7 @@ if __name__ == '__main__':
...
@@ -10,7 +10,7 @@ if __name__ == '__main__':
"
parameters
"
:
{
"
parameters
"
:
{
"
epoches
"
:{
"
value
"
:
50
},
"
epoches
"
:{
"
value
"
:
50
},
"
eval_inter
"
:{
"
value
"
:
1
},
"
eval_inter
"
:{
"
value
"
:
1
},
"
noise_threshold
"
:
{
"
distribution
"
:
"
log_uniform_values
"
,
"
max
"
:
10000.
,
"
min
"
:
0.000
1
},
"
noise_threshold
"
:
{
"
distribution
"
:
"
log_uniform_values
"
,
"
max
"
:
10000.
,
"
min
"
:
1
},
"
lr
"
:
{
"
distribution
"
:
"
log_uniform_values
"
,
"
max
"
:
0.01
,
"
min
"
:
0.0001
},
"
lr
"
:
{
"
distribution
"
:
"
log_uniform_values
"
,
"
max
"
:
0.01
,
"
min
"
:
0.0001
},
"
batch_size
"
:
{
"
value
"
:
64
},
"
batch_size
"
:
{
"
value
"
:
64
},
"
positive_prop
"
:
{
"
distribution
"
:
"
uniform
"
,
"
max
"
:
95.
,
"
min
"
:
5.
},
"
positive_prop
"
:
{
"
distribution
"
:
"
uniform
"
,
"
max
"
:
95.
,
"
min
"
:
5.
},
...
@@ -21,15 +21,21 @@ if __name__ == '__main__':
...
@@ -21,15 +21,21 @@ if __name__ == '__main__':
"
dataset_val_dir
"
:
{
"
value
"
:
"
data/processed_data_wiff/npy_image/test_data
"
},
"
dataset_val_dir
"
:
{
"
value
"
:
"
data/processed_data_wiff/npy_image/test_data
"
},
"
dataset_ref_dir
"
:
{
"
values
"
:
[
"
image_ref/img_ref
"
,
"
image_ref/img_ref_count_th_10
"
,
"
image_ref/img_ref_count_th_5
"
]},
"
dataset_ref_dir
"
:
{
"
values
"
:
[
"
image_ref/img_ref
"
,
"
image_ref/img_ref_count_th_10
"
,
"
image_ref/img_ref_count_th_5
"
]},
},
},
"
controller
"
:{
"
max_iter
"
:
10
"
type
"
:
"
local
"
},
}
}
sweep_id
=
wdb
.
sweep
(
sweep_configuration
)
for
i
in
range
(
sweep_configuration
[
"
max_iter
"
]):
run_config
=
{}
for
p
,
v
in
sweep_configuration
[
"
parameters
"
].
items
()
:
# Start the local controller
if
"
value
"
in
v
:
sweep
=
wdb
.
controller
(
sweep_id
)
run_config
[
p
]
=
v
[
"
value
"
]
while
not
sweep
.
done
():
elif
"
values
"
in
v
:
sweep
.
print_status
()
run_config
[
p
]
=
random
.
choice
(
v
[
"
values
"
])
sweep
.
step
()
elif
"
distribution
"
in
v
:
time
.
sleep
(
5
)
if
v
[
"
distribution
"
]
==
"
uniform
"
:
run_config
[
p
]
=
random
.
uniform
(
v
[
"
min
"
],
v
[
"
max
"
])
elif
v
[
"
distribution
"
]
==
"
log_uniform_values
"
:
run_config
[
p
]
=
np
.
exp
(
random
.
uniform
(
np
.
log
(
v
[
"
min
"
]),
np
.
log
(
v
[
"
max
"
])))
print
(
'
Launching run
'
)
run_duo
(
run_config
)
This diff is collapsed.
Click to expand it.
image_ref/sweep_train.py
+
15
−
17
View file @
1f48e9b1
...
@@ -6,7 +6,7 @@ import torch.nn as nn
...
@@ -6,7 +6,7 @@ import torch.nn as nn
from
model
import
Classification_model_duo_contrastive
from
model
import
Classification_model_duo_contrastive
import
torch.optim
as
optim
import
torch.optim
as
optim
def
train_duo
(
model
,
data_train
,
optimizer
,
loss_function
,
epoch
,
wandb
):
def
train_duo
(
model
,
data_train
,
optimizer
,
loss_function
,
epoch
):
model
.
train
()
model
.
train
()
losses
=
0.
losses
=
0.
acc
=
0.
acc
=
0.
...
@@ -40,7 +40,7 @@ def train_duo(model, data_train, optimizer, loss_function, epoch, wandb):
...
@@ -40,7 +40,7 @@ def train_duo(model, data_train, optimizer, loss_function, epoch, wandb):
return
losses
,
acc
return
losses
,
acc
def
val_duo
(
model
,
data_test
,
loss_function
,
epoch
,
wandb
):
def
val_duo
(
model
,
data_test
,
loss_function
,
epoch
):
model
.
eval
()
model
.
eval
()
losses
=
0.
losses
=
0.
acc
=
0.
acc
=
0.
...
@@ -94,15 +94,15 @@ def run_duo(args):
...
@@ -94,15 +94,15 @@ def run_duo(args):
print
(
'
Wandb initialised
'
)
print
(
'
Wandb initialised
'
)
# load data
# load data
data_train
,
data_val_batch
,
data_test_batch
=
load_data_duo
(
base_dir_train
=
args
.
dataset_train_dir
,
data_train
,
data_val_batch
,
data_test_batch
=
load_data_duo
(
base_dir_train
=
args
[
'
dataset_train_dir
'
]
,
base_dir_val
=
args
.
dataset_val_dir
,
base_dir_val
=
args
[
'
dataset_val_dir
'
]
,
base_dir_test
=
None
,
base_dir_test
=
None
,
batch_size
=
args
.
batch_size
,
batch_size
=
args
[
'
batch_size
'
]
,
ref_dir
=
args
.
dataset_ref_dir
,
ref_dir
=
args
[
'
dataset_ref_dir
'
]
,
positive_prop
=
args
.
positive_prop
,
sampler
=
args
.
sampler
)
positive_prop
=
args
[
'
positive_prop
'
]
,
sampler
=
args
[
'
sampler
'
]
)
# load model
# load model
model
=
Classification_model_duo_contrastive
(
model
=
args
.
model
,
n_class
=
2
)
model
=
Classification_model_duo_contrastive
(
model
=
args
[
'
model
'
]
,
n_class
=
2
)
model
.
float
()
model
.
float
()
# move parameters to GPU
# move parameters to GPU
if
torch
.
cuda
.
is_available
():
if
torch
.
cuda
.
is_available
():
...
@@ -118,15 +118,15 @@ def run_duo(args):
...
@@ -118,15 +118,15 @@ def run_duo(args):
val_loss
=
[]
val_loss
=
[]
# init training
# init training
loss_function
=
nn
.
CrossEntropyLoss
()
loss_function
=
nn
.
CrossEntropyLoss
()
if
args
.
opti
==
'
adam
'
:
if
args
[
'
opti
'
]
==
'
adam
'
:
optimizer
=
optim
.
Adam
(
model
.
parameters
(),
lr
=
args
.
lr
)
optimizer
=
optim
.
Adam
(
model
.
parameters
(),
lr
=
args
[
'
lr
'
]
)
# train model
# train model
for
e
in
range
(
args
.
epoches
):
for
e
in
range
(
args
[
'
epoches
'
]
):
loss
,
acc
=
train_duo
(
model
,
data_train
,
optimizer
,
loss_function
,
e
,
args
.
wandb
)
loss
,
acc
=
train_duo
(
model
,
data_train
,
optimizer
,
loss_function
,
e
)
train_loss
.
append
(
loss
)
train_loss
.
append
(
loss
)
train_acc
.
append
(
acc
)
train_acc
.
append
(
acc
)
if
e
%
args
.
eval_inter
==
0
:
if
e
%
args
[
'
eval_inter
'
]
==
0
:
loss
,
acc
,
acc_contrastive
=
val_duo
(
model
,
data_val_batch
,
loss_function
,
e
,
args
.
wandb
)
loss
,
acc
,
acc_contrastive
=
val_duo
(
model
,
data_val_batch
,
loss_function
,
e
)
val_loss
.
append
(
loss
)
val_loss
.
append
(
loss
)
val_acc
.
append
(
acc
)
val_acc
.
append
(
acc
)
val_cont_acc
.
append
(
acc_contrastive
)
val_cont_acc
.
append
(
acc_contrastive
)
...
@@ -134,6 +134,4 @@ def run_duo(args):
...
@@ -134,6 +134,4 @@ def run_duo(args):
if
__name__
==
'
__main__
'
:
if
__name__
==
'
__main__
'
:
config
=
wdb
.
config
pass
print
(
config
)
\ No newline at end of file
run_duo
(
config
)
\ No newline at end of file
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment