auto dataset = CustomDataset(feature_file, label_file).map(torch::data::transforms::Stack<>()); auto data_loader = torch::data::make_data_loader(dataset, batch_size);
Net model = Net(); torch::optim::SGD optimizer(model->parameters(), learning_rate);
Net model= Net(); torch::load(model, model_path); model->eval();
auto dataset = CustomDataset(feature_file, label_file).map(torch::data::transforms::Stack<>()); auto data_loader = torch::data::make_data_loader(dataset, batch_size);
size_t correct = 0; size_t total = 0;
for (auto& batch : *data_loader) { auto data = batch.data; auto targets = batch.target;
auto output = model->forward(data); auto pred = output.argmax(1);
correct += pred.eq(targets).sum().templateitem<int64_t>(); total += targets.size(0); }
fc = register_module("fc", torch::nn::Linear(128, 6)); }
torch::nn::Sequential _make_layer(int64_t in_channels, int64_t out_channels, int blocks, int stride) { torch::nn::Sequential layers; layers->push_back(ResidualBlock1D(in_channels, out_channels, stride)); for (int i = 1; i < blocks; ++i) { layers->push_back(ResidualBlock1D(out_channels, out_channels)); } return layers; }
torch::Tensor forward(torch::Tensor x){ x = x.unsqueeze(1); // (batch, 1, 561) x = torch::relu(bn(conv(x))); x = layer1->forward(x); x = layer2->forward(x); x = layer3->forward(x);
x = torch::adaptive_avg_pool1d(x, 1); x = x.view({x.size(0), -1}); x = fc(x); return x; } }; TORCH_MODULE(ResNet1D);
torch::Device device(torch::kCPU); if (torch::cuda::is_available()) { std::cout << "CUDA is available! Training on GPU." << std::endl; device = torch::Device(torch::kCUDA); }
auto train_dataset = CustomDataset(train_feature_file, train_label_file).map(torch::data::transforms::Stack<>()); auto train_loader = torch::data::make_data_loader(train_dataset, batch_size);
auto test_dataset = CustomDataset(test_feature_file, test_label_file).map(torch::data::transforms::Stack<>()); auto test_loader = torch::data::make_data_loader(test_dataset, batch_size);
for (auto& batch : *train_loader) { auto data = batch.data.to(device); auto targets = batch.target.to(device);
optimizer.zero_grad(); auto output = model->forward(data); auto loss = torch::nn::functional::cross_entropy(output, targets); loss.backward(); optimizer.step();
torch::Device device(torch::kCPU); if (torch::cuda::is_available()) { std::cout << "CUDA is available! Training on GPU." << std::endl; device = torch::Device(torch::kCUDA); }
auto dataset = CustomDataset(feature_file, label_file).map(torch::data::transforms::Stack<>()); auto data_loader = torch::data::make_data_loader(dataset, batch_size);
size_t correct = 0; size_t total = 0;
torch::NoGradGuard no_grad;
for (auto& batch : *data_loader) { auto data = batch.data; auto targets = batch.target;
data = data.to(device); targets = targets.to(device);
auto output = model->forward(data);
auto pred = output.argmax(1);
correct += pred.eq(targets).sum().templateitem<int64_t>(); total += targets.size(0); }