Commit 304edb43 authored by Federico Rossi's avatar Federico Rossi

:..

parent 46cc9f68
*.idx* filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.tinymodel filter=lfs diff=lfs merge=lfs -text
*.idx* filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.tinymodel filter=lfs diff=lfs merge=lfs -text
......@@ -79,8 +79,8 @@ add_executable(gtrsb_test_p12_0 test.cpp ${tiny_dnn_headers})
add_executable(gtrsb_test_p8_0 test.cpp ${tiny_dnn_headers})
target_link_libraries(gtrsb_test_p8_0
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(gtrsb_test_p8_0 PRIVATE CNN_USE_POSIT CNN_POS_BITS=8 CNN_EXP_BITS=0 CNN_POS_STORAGE=int8_t CNN_POS_BACKEND=uint32_t)
add_dependencies(gtrsb_tests_type gtrsb_test_p8_0)
target_compile_definitions(gtrsb_test_p8_0 PRIVATE CNN_USE_POSIT CNN_POS_BITS=8 CNN_EXP_BITS=0 CNN_POS_STORAGE=int8_t CNN_POS_BACKEND=uint8_t)
add_dependencies(gtrsb_tests_type gtrsb_test_p8_0)
add_executable(gtrsb_test_p7_0 test.cpp ${tiny_dnn_headers})
target_link_libraries(gtrsb_test_p7_0
......
......@@ -12,17 +12,17 @@ int main(int argc, char **argv) {
tiny_dnn::network<tiny_dnn::sequential> net;
std::string model_path(argv[1]);
std::string data_path(argv[2]);
int num = atoi(argv[3]);
net.load(model_path); //load pre-trained model
std::vector<tiny_dnn::label_t> vlab;
std::vector<tiny_dnn::vec_t> vimg;
std::vector<unsigned int> times;
parse_binImagenet<32,32,3,uint8_t>(data_path,&vimg,&vlab,-1,1);
parse_binImagenet<32,32,3,uint8_t>(data_path,&vimg,&vlab,-2,2);
int num = (argc < 4)? vimg.size():atoi(argv[3]);
int successes = 0;
auto start = get_time::now();
double time_mean = 0;
for(unsigned int i = 0; i < num; ++i) {
auto inner_b = get_time::now();
auto res = net.predict(vimg[i]);
......@@ -34,6 +34,7 @@ int main(int argc, char **argv) {
std::cout << "Inferencing " << i+1 << "/" << vimg.size() << "(Acc: " << (double)successes/double(i+1) << ")" << std::endl;
std::clog << "\r" << i+1 << "/" << vimg.size();
unsigned int tttt = std::chrono::duration_cast<ms>(inner_e - inner_b).count();
time_mean+= double(tttt);
times.push_back(tttt);
}
auto end = get_time::now();
......@@ -41,5 +42,6 @@ int main(int argc, char **argv) {
make_test_report(vimg,successes,times,std::chrono::duration_cast<ms>(diff).count(),"../../plot/test_perf_");
std::cout << "Accuracy: " << successes << "/" << vimg.size() << "\n";
std::cout << "Time elapsed: " << std::chrono::duration_cast<ms>(diff).count() << "ms\n";
std::cout << "Time mean: " << time_mean << std::endl;
return 0;
}
\ No newline at end of file
......@@ -32,7 +32,7 @@ static tiny_dnn::network<tiny_dnn::sequential>* construct_net() {
using fc = tiny_dnn::layers::fc;
using conv = tiny_dnn::layers::conv;
using ave_pool = tiny_dnn::layers::ave_pool;
using tanh = tiny_dnn::activation::ptanh;
using tanh = tiny_dnn::activation::relu;
using smax = tiny_dnn::activation::softmax;
using tiny_dnn::core::connection_table;
using padding = tiny_dnn::padding;
......@@ -52,7 +52,7 @@ static tiny_dnn::network<tiny_dnn::sequential>* construct_net() {
padding::valid, true, 1, 1, 1, 1)
<< tanh()
<< fc(120, 43, true) // F6, 120-in, 10-out
<< tanh();
<< smax();
return nn;
}
......@@ -64,6 +64,7 @@ int main(int argc, char **argv) {
argv[3] -> train params
*/
//srand(time(NULL));
unsigned int seed = rand();
std::cout << "Random seed:\n" << seed << std::endl;
tiny_dnn::set_random_seed(seed);
......@@ -72,14 +73,14 @@ int main(int argc, char **argv) {
std::vector<vec_t> timg,vimg;
std::string base_path(argv[1]);
for(int i = 0; i < 42;++i)
parse_binImagenet<32,32,3,uint8_t>(base_path+"/"+std::to_string(i)+".bin",&timg,&tlab,-1,1);
parse_binImagenet<32,32,3,uint8_t>(base_path+"/"+std::to_string(i)+".bin",&timg,&tlab,-2,2);
vutil::randomShuffle(timg,tlab);
std::cout << "Num train images: " << timg.size() << std::endl;
parse_binImagenet<32,32,3,uint8_t>(argv[2],&vimg,&vlab,-1,1);
parse_binImagenet<32,32,3,uint8_t>(argv[2],&vimg,&vlab,-2,2);
std::cout << "Num test images: " << vimg.size() << std::endl;
tiny_dnn::adagrad optimizer;
optimizer.alpha*=4;
optimizer.alpha*=2;
NNTrainParams tparams(argv[3],&optimizer,tlab,vlab,timg,vimg);
std::cout << tparams;
NNModel model("GTRSB-TANH",*construct_net());
......
......@@ -79,7 +79,7 @@ add_executable(mnist_test_p12_0 test_alt.cpp ${tiny_dnn_headers})
add_executable(mnist_test_p8_0 test_alt.cpp ${tiny_dnn_headers})
target_link_libraries(mnist_test_p8_0
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(mnist_test_p8_0 PRIVATE CNN_USE_POSIT CNN_POS_BITS=8 CNN_EXP_BITS=0 CNN_POS_STORAGE=int8_t CNN_POS_BACKEND=uint32_t)
target_compile_definitions(mnist_test_p8_0 PRIVATE CNN_USE_POSIT CNN_POS_BITS=8 CNN_EXP_BITS=0 CNN_POS_STORAGE=int8_t CNN_POS_BACKEND=uint8_t)
add_dependencies(mnist_tests_type mnist_test_p8_0)
add_executable(mnist_test_p7_0 test_alt.cpp ${tiny_dnn_headers})
......
......@@ -31,7 +31,7 @@ int main(int argc, char **argv) {
int successes = 0;
auto start = get_time::now();
double stime = 0;
for(unsigned int i = 0; i < test_images.size(); ++i) {
auto inner_b = get_time::now();
auto res = net.predict(test_images[i]);
......@@ -44,7 +44,9 @@ int main(int argc, char **argv) {
std::clog << "\r" << i+1 << "/" << test_images.size();
unsigned int tttt = std::chrono::duration_cast<ms>(inner_e - inner_b).count();
times.push_back(tttt);
stime+=tttt;
}
std::cout << std::endl << stime/test_images.size() << std::endl;
auto end = get_time::now();
auto diff = end - start;
make_test_report(test_images,successes,times,std::chrono::duration_cast<ms>(diff).count(),"../../plot/test_perf_");
......
......@@ -30,9 +30,10 @@ static void construct_net(tiny_dnn::network<tiny_dnn::sequential> &nn,tiny_dnn::
using fc = tiny_dnn::layers::fc;
using conv = tiny_dnn::layers::conv;
using ave_pool = tiny_dnn::layers::ave_pool;
using tanh = tiny_dnn::activation::ptanh;
using tanh = tiny_dnn::activation::elu;
using tiny_dnn::core::connection_table;
using padding = tiny_dnn::padding;
using smax = tiny_dnn::activation::softmax;
nn << conv(32, 32, 5, 1, 6, // C1, 1@32x32-in, 6@28x28-out
padding::valid, true, 1, 1, 1, 1, backend_type)
......@@ -100,7 +101,7 @@ static void train_lenet(const std::string &data_dir_path,
static_cast<tiny_dnn::float_t>(sqrt(n_minibatch) * learning_rate));
int epoch = 1;
float_t total_elapsed = float_t{0.0};
tiny_dnn::float_t total_elapsed = tiny_dnn::float_t{0.0};
std::vector<unsigned int> test_succ;
std::vector<unsigned int> train_succ;
// create callback
......
find_package(Qt5 COMPONENTS Widgets Charts REQUIRED)
find_package(Qt5Charts)
#find_package(Qt5 COMPONENTS Widgets Charts)
#find_package(Qt5Charts)
add_executable(qttest test.cpp ${tiny_dnn_headers})
target_link_libraries(qttest Qt5::Widgets Qt5::Charts ${project_library_target_name} ${REQUIRED_LIBRARIES})
#add_executable(qttest test.cpp ${tiny_dnn_headers})
#target_link_libraries(qttest Qt5::Widgets Qt5::Charts ${project_library_target_name} ${REQUIRED_LIBRARIES})
users = [
{
uid = 123
first_name = "John"
last_name = "Doe"
permissions = [ "READ", "CREATE", "UPDATE" ]
join_date = [ 5, 5, 2013 ]
}
{
uid = 331
first_name = "Test"
last_name = "Person"
permissions = [ "UPDATE", "CREATE" ]
join_date = [ 2, 4, 2012 ]
}
]
users = [
{
uid = 123
first_name = "John"
last_name = "Doe"
permissions = [ "READ", "CREATE", "UPDATE" ]
join_date = [ 5, 5, 2013 ]
}
{
uid = 331
first_name = "Test"
last_name = "Person"
permissions = [ "UPDATE", "CREATE" ]
join_date = [ 2, 4, 2012 ]
}
]
global = { maintainer = "user" }
\ No newline at end of file
......@@ -39,8 +39,8 @@ class elu_layer : public activation_layer {
void forward_activation(const vec_t &x, vec_t &y) override {
for (size_t j = 0; j < x.size(); j++) {
y[j] =
x[j] < float_t(0) ? (alpha_ * (tiny_dnn::exp(x[j]) - float_t(1))) : x[j];
y[j] = tiny_dnn::elu(x[j],alpha_);
//x[j] < float_t(0) ? (alpha_ * (tiny_dnn::exp(x[j]) - float_t(1))) : x[j];
}
}
......
#pragma once
#include "tiny_dnn/tiny_dnn.h"
#include <unistd.h>
#include <cstdlib>
#include <signal.h>
#include <functional>
using namespace tiny_dnn;
#ifdef QT_BOARD
using namespace tinyboard;
#endif
std::function<void(int)> callback_wrapper;
void callback_function(int value) {
callback_wrapper(value);
}
class NNTrainSession
{
private:
......@@ -17,7 +29,20 @@ private:
#ifdef QT_BOARD
tinyboard::TopicHandler* _accuracy_publisher;
#endif
bool _interrupted = false;
void _catchInt(int sigNum) {
if(_interrupted == true) {
std::cout << "Forcing exit" << std::endl;
exit(sigNum);
}
std::cout << "Interrupted, training will be stopped at the end of the epoch\n and the model will be saved\n"
<< "Press CTRL+C again to force close the training" << std::endl;
_interrupted = true;
}
public:
NNTrainSession(network<tiny_dnn::sequential>* model,NNTrainParams* params) {
this->_params = params;
......@@ -27,6 +52,12 @@ public:
_accuracy_publisher = new tinyboard::TopicHandler(tinyboard::topics[0]);
_accuracy_publisher->make();
#endif
callback_wrapper = std::bind(&NNTrainSession::_catchInt,this,std::placeholders::_1);
struct sigaction sigIntHandler;
sigIntHandler.sa_handler = callback_function;
sigemptyset(&sigIntHandler.sa_mask);
sigIntHandler.sa_flags = 0;
sigaction(SIGINT, &sigIntHandler, NULL);
}
......@@ -44,6 +75,10 @@ public:
_accuracy_publisher->writeObj(accuracy);
#endif
std::cout << "\nEpoch " << _eepochs << " completed in " << epochTime << " s\nSession time: " << _elapsed << " s\nValidation accuracy: " << accuracy << std::endl;
if(_interrupted) {
_model->save("dump_epoch_"+std::to_string(_eepochs-1)+".tinymodel");
_interrupted = false;
}
_display->restart(_params->timages.size());
_timer.restart();
}
......
......@@ -118,6 +118,18 @@ namespace tiny_dnn {
return float_t(2)*sigmoid(float_t(2)*x)-float_t(1);
#endif
}
float_t elu(float_t x,float_t a=1) {
#ifdef CNN_USE_POSIT
#if(CNN_EXP_BITS == 0)
return x.fastELU();
#else
return (x >= float_t(0))?x: tiny_dnn::exp(x)-float_t(1);
#endif
#else
return (x >= float_t(0))?x: tiny_dnn::exp(x)-float_t(1);
#endif
}
///< output label(class-index) for classification
///< must be equal to size_t, because size of last layer is equal to num.
/// of classes
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment