# Pastebin SVBFKAwz #include #include #include #include #include #include using namespace mlpack; using namespace mlpack::ann; using namespace mlpack::math; using namespace mlpack::optimization; using namespace mlpack::regression; PROGRAM_INFO("GAN", "Convolution"); PARAM_STRING_IN("dataset", "path/to/dataset.", "i", "mnist.arm"); PARAM_STRING_IN("output-dataset", "path/to/output.", "o", "output.txt"); PARAM_INT_IN("dataset-max-cols", "dataset max cols.", "m", -1); PARAM_INT_IN("batch-size", "batch-size.", "b", 100); PARAM_INT_IN("generator-update-step", "generator-update-step.", "g", 100); PARAM_INT_IN("discriminatorPreTrain", "discriminatorPreTrain", "x", 0); PARAM_INT_IN("dNumKernels", "dNumKernels", "k", 32); PARAM_INT_IN("noise-dim", "noise dim.", "N", 25); PARAM_INT_IN("num-samples", "num samples.", "n", 1000); PARAM_DOUBLE_IN("step-size", "learning rate.", "r", 0.01); PARAM_INT_IN("num-epoches", "num-epoches.", "e", 20); PARAM_DOUBLE_IN("tolerance", "tolerance.", "t", 1e-5); PARAM_FLAG("shuffle", "shuffle or not", "s"); int main(int argc, char* argv[]) { /* #ifdef LOCAL freopen("output.txt", "w", stdout); #endif */ // We treat the archtitechture as constant. CLI::ParseCommandLine(argc, argv); size_t dNumKernels = 32; size_t discriminatorPreTrain = 300; size_t batchSize = 50; size_t noiseDim = 10; size_t generatorUpdateStep = 1; size_t numSamples = 10; double stepSize = 0.0003; double eps = 1e-8; size_t numEpoches = 2000; double tolerance = 1e-5; int datasetMaxCols = -1; bool shuffle = true; std::string dataset = CLI::GetParam("dataset"); std::string output_dataset = CLI::GetParam("output-dataset"); if (CLI::HasParam("dataset-max-cols")) datasetMaxCols = CLI::GetParam("dataset-max-cols"); if (CLI::HasParam("batch-size")) batchSize = CLI::GetParam("batch-size"); if (CLI::HasParam("generator-update-step")) generatorUpdateStep = CLI::GetParam("generator-update-step"); if (CLI::HasParam("noise-dim")) noiseDim = CLI::GetParam("noise-dim"); if (CLI::HasParam("num-samples")) numSamples = CLI::GetParam("num-samples"); if (CLI::HasParam("step-size")) stepSize = CLI::GetParam("step-size"); if (CLI::HasParam("num-epoches")) numEpoches = CLI::GetParam("num-epoches"); if (CLI::HasParam("tolerance")) tolerance = CLI::GetParam("tolerance"); if (CLI::HasParam("discriminatorPreTrain")) discriminatorPreTrain = CLI::GetParam("discriminatorPreTrain"); shuffle = CLI::HasParam("shuffle"); Log::Info << "dataset = '" << dataset << "'" << std::endl; Log::Info << "output_dataset = '" << output_dataset << "'" << std::endl; Log::Info << std::boolalpha << " batchSize = " << batchSize << " generatorUpdateStep = " << generatorUpdateStep << " noiseDim = " << noiseDim << " numSamples = " << numSamples << " stepSize = " << stepSize << " numEpoches = " << numEpoches << " tolerance = " << tolerance << " shuffle = " << shuffle << std::endl; arma::mat trainData; trainData.load(dataset); if (datasetMaxCols > 0) trainData = trainData.cols(0, datasetMaxCols - 1); Log::Info << "Dataset loaded (" << trainData.n_rows << ", " << trainData.n_cols << ")" << std::endl; size_t numIterations = trainData.n_cols * numEpoches; numIterations /= batchSize; Log::Info << trainData.n_rows << "--------" << trainData.n_cols << std::endl; // Discriminator Network FFN> discriminator; discriminator.Add>(1, dNumKernels, 5, 5, 1, 1, 2, 2, 28, 28); discriminator.Add>(); discriminator.Add >(); discriminator.Add>(2, 2, 2, 2); discriminator.Add>(dNumKernels, 2 * dNumKernels, 5, 5, 1, 1, 2, 2, 14, 14); discriminator.Add>(); discriminator.Add >(); discriminator.Add>(2, 2, 2, 2); discriminator.Add>(7 * 7 * 2 *dNumKernels, 1024); discriminator.Add>(); discriminator.Add >(); discriminator.Add>(1024, 1); discriminator.Add>(); BiLinearFunction interpolation1(28, 28, 56, 56 , noiseDim / 2); BiLinearFunction interpolation2(28, 28, 56, 56 , noiseDim / 4); // Generator Network FFN> generator; generator.Add>(noiseDim, 3136); generator.Add>(); generator.Add >(); generator.Add>(1, noiseDim / 2, 3, 3, 2, 2, 1, 1, 56, 56); generator.Add>(); generator.Add >(); generator.Add >(interpolation1); generator.Add>(noiseDim / 2, noiseDim / 4, 3, 3, 2, 2, 1, 1, 56, 56); generator.Add>(); generator.Add >(); generator.Add >(interpolation2); generator.Add>(noiseDim / 4, 1, 1, 1, 2, 2, 0, 0, 56, 56); generator.Add>(); generator.Add >(); generator.Add>(); // Intialisation function GaussianInitialization gaussian(0, 1); // Optimizer AdamBatchSGD optimizer(batchSize, stepSize, numIterations, tolerance, shuffle); std::function noiseFunction = [] () { return math::RandNormal(0, 1);}; // Train Gan with preTrain step size 300 GAN>, GaussianInitialization, std::function> gan(trainData, generator, discriminator, gaussian, noiseFunction, noiseDim, batchSize, generatorUpdateStep, discriminatorPreTrain); Log::Info << "Training..." << std::endl; // std::cout << "Train" << std::endl; gan.Train(optimizer); // Generate samples Log::Info << "Sampling..." << std::endl; arma::mat noise(noiseDim, 1); size_t dim = std::sqrt(trainData.n_rows); arma::mat generatedData(2 * dim, dim * numSamples); for (size_t i = 0; i < numSamples; i++) { arma::mat samples; noise.imbue( [&]() { return noiseFunction(); } ); generator.Forward(noise, samples); samples.reshape(dim, dim); samples = samples.t(); generatedData.submat(0, i * dim, dim - 1, i * dim + dim - 1) = samples; samples = trainData.col(math::RandInt(0, trainData.n_cols)); samples.reshape(dim, dim); samples = samples.t(); generatedData.submat(dim, i * dim, 2 * dim - 1, i * dim + dim - 1) = samples; } Log::Info << "Saving output to " << output_dataset << "..." << std::endl; generatedData.save(output_dataset, arma::raw_ascii); Log::Info << "Output saved!" << std::endl; }