-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathtrain.py
More file actions
55 lines (45 loc) · 1.57 KB
/
train.py
File metadata and controls
55 lines (45 loc) · 1.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import os
import librosa
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
# -------------- Dataset --------------
class AudioDataset(Dataset):
def __init__(self, folder_path):
self.folder_path = folder_path
self.file_list = os.listdir(folder_path)
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
audio_path = os.path.join(self.folder_path, self.file_list[idx])
audio, sr = librosa.load(audio_path, sr=44100)
audio = torch.tensor(audio).float()
return audio
# -------------- Simple Model --------------
class SimpleSeparator(nn.Module):
def __init__(self):
super(SimpleSeparator, self).__init__()
self.fc = nn.Linear(44100, 44100)
def forward(self, x):
return self.fc(x)
# -------------- Training --------------
def train():
dataset = AudioDataset('dataset/train')
dataloader = DataLoader(dataset, batch_size=2, shuffle=True)
model = SimpleSeparator()
optimizer = optim.Adam(model.parameters(), lr=0.001)
criterion = nn.MSELoss()
for epoch in range(5): # Just 5 epochs for now
for batch in dataloader:
optimizer.zero_grad()
output = model(batch)
loss = criterion(output, batch)
loss.backward()
optimizer.step()
print(f"Epoch {epoch+1}, Loss: {loss.item()}")
# Save the model
torch.save(model.state_dict(), 'model.pth')
print("Model saved as model.pth!")
if __name__ == "__main__":
train()