gen_12
This commit is contained in:
parent
33d1945de2
commit
5895f39874
@ -42,9 +42,9 @@ def test_interpol():
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# labels = LabelConverter(["foam_PDMS_white", "foam_PDMS_black", "foam_PDMS_TX100", "foam_PE", "antistatic_foil", "cardboard", "glass", "kapton", "bubble_wrap_PE", "fabric_PP", ])
|
labels = LabelConverter(["foam_PDMS_white", "foam_PDMS_black", "foam_PDMS_TX100", "foam_PE", "antistatic_foil", "cardboard", "glass", "kapton", "bubble_wrap_PE", "fabric_PP", ])
|
||||||
labels = LabelConverter(["foam_PDMS_white", "foam_PDMS_black", "foam_PDMS_TX100", "foam_PE", "kapton", "bubble_wrap_PE", "fabric_PP", ])
|
# labels = LabelConverter(["foam_PDMS_white", "foam_PDMS_black", "foam_PDMS_TX100", "foam_PE", "kapton", "bubble_wrap_PE", "fabric_PP", ])
|
||||||
models_dir = "/home/matth/Uni/TENG/teng_2/models_gen_8" # where to save models, settings and results
|
models_dir = "/home/matth/Uni/TENG/teng_2/models_gen_12" # where to save models, settings and results
|
||||||
if not path.isdir(models_dir):
|
if not path.isdir(models_dir):
|
||||||
makedirs(models_dir)
|
makedirs(models_dir)
|
||||||
data_dir = "/home/matth/Uni/TENG/teng_2/sorted_data"
|
data_dir = "/home/matth/Uni/TENG/teng_2/sorted_data"
|
||||||
@ -53,17 +53,17 @@ if __name__ == "__main__":
|
|||||||
# gen_6 best options: no glass, cardboard and antistatic_foil, not bidirectional, lr=0.0007, no datasplitter, 2 layers n_hidden = 10
|
# gen_6 best options: no glass, cardboard and antistatic_foil, not bidirectional, lr=0.0007, no datasplitter, 2 layers n_hidden = 10
|
||||||
|
|
||||||
# Test with
|
# Test with
|
||||||
num_layers = [ 2 ]
|
num_layers = [ 2, 3 ]
|
||||||
hidden_size = [ 7, 11, 14 ]
|
hidden_size = [ 21, 28 ]
|
||||||
bidirectional = [ False, True ]
|
bidirectional = [ False, True ]
|
||||||
t_const_int = ConstantInterval(0.01) # TODO check if needed: data was taken at equal rate, but it isnt perfect -> maybe just ignore?
|
t_const_int = ConstantInterval(0.01) # TODO check if needed: data was taken at equal rate, but it isnt perfect -> maybe just ignore?
|
||||||
t_norm = Normalize(-1, 1)
|
t_norm = Normalize(-1, 1)
|
||||||
transforms = [[ ], [ t_norm ]] #, [ t_norm, t_const_int ]]
|
transforms = [[ t_norm ]] #, [ t_norm, t_const_int ]]
|
||||||
batch_sizes = [ 4 ]
|
batch_sizes = [ 4 ]
|
||||||
splitters = [ DataSplitter(50, drop_if_smaller_than=30), DataSplitter(100, drop_if_smaller_than=30) ] # smallest file has length 68 TODO: try with 0.5-1second snippets
|
splitters = [ DataSplitter(50, drop_if_smaller_than=30) ] # smallest file has length 68 TODO: try with 0.5-1second snippets
|
||||||
num_epochs = [ 5 ]
|
num_epochs = [ 80 ]
|
||||||
# (epoch, min_accuracy)
|
# (epoch, min_accuracy)
|
||||||
training_cancel_points = [(10, 10), (20, 20), (40, 30)]
|
training_cancel_points = [(15, 20), (40, 25)]
|
||||||
# training_cancel_points = []
|
# training_cancel_points = []
|
||||||
|
|
||||||
args = [num_layers, hidden_size, bidirectional, [None], [None], [None], transforms, splitters, num_epochs, batch_sizes]
|
args = [num_layers, hidden_size, bidirectional, [None], [None], [None], transforms, splitters, num_epochs, batch_sizes]
|
||||||
@ -75,15 +75,13 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
loss_func = nn.CrossEntropyLoss()
|
loss_func = nn.CrossEntropyLoss()
|
||||||
optimizers = [
|
optimizers = [
|
||||||
lambda model: torch.optim.Adam(model.parameters(), lr=0.0005),
|
|
||||||
lambda model: torch.optim.Adam(model.parameters(), lr=0.0007),
|
lambda model: torch.optim.Adam(model.parameters(), lr=0.0007),
|
||||||
# lambda model: torch.optim.Adam(model.parameters(), lr=0.008),
|
|
||||||
]
|
]
|
||||||
schedulers = [
|
schedulers = [
|
||||||
None,
|
None,
|
||||||
# lambda optimizer, st: torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9),
|
# lambda optimizer, st: torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9),
|
||||||
# lambda optimizer, st: torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.5),
|
# lambda optimizer, st: torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.5),
|
||||||
lambda optimizer, st: torch.optim.lr_scheduler.StepLR(optimizer, step_size=st.num_epochs // 8, gamma=0.50, verbose=False),
|
lambda optimizer, st: torch.optim.lr_scheduler.StepLR(optimizer, step_size=st.num_epochs // 8, gamma=0.60, verbose=False),
|
||||||
# lambda optimizer, st: torch.optim.lr_scheduler.StepLR(optimizer, step_size=st.num_epochs // 10, gamma=0.75, verbose=False),
|
# lambda optimizer, st: torch.optim.lr_scheduler.StepLR(optimizer, step_size=st.num_epochs // 10, gamma=0.75, verbose=False),
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -105,7 +103,7 @@ if __name__ == "__main__":
|
|||||||
for s in range(len(schedulers)):
|
for s in range(len(schedulers)):
|
||||||
for i in range(len(settings)):
|
for i in range(len(settings)):
|
||||||
st = settings[i]
|
st = settings[i]
|
||||||
train_set, test_set = load_datasets(data_dir, labels, exclude_n_object=None, voltage=None, transforms=st.transforms, split_function=st.splitter, train_to_test_ratio=0.7, random_state=80, num_workers=4)
|
train_set, test_set = load_datasets(data_dir, labels, exclude_n_object=None, voltage=None, transforms=st.transforms, split_function=st.splitter, train_to_test_ratio=0.7, random_state=123, num_workers=4)
|
||||||
|
|
||||||
generator = torch.manual_seed(42)
|
generator = torch.manual_seed(42)
|
||||||
train_loader = DataLoader(train_set, batch_size=st.batch_size, shuffle=True, generator=generator, collate_fn=PadSequences())
|
train_loader = DataLoader(train_set, batch_size=st.batch_size, shuffle=True, generator=generator, collate_fn=PadSequences())
|
||||||
|
Loading…
Reference in New Issue
Block a user