Commit 3f61e11d authored by Federico Rossi's avatar Federico Rossi

....

parent 8ae50096
......@@ -15,60 +15,6 @@ if(USE_SERIALIZER)
${project_library_target_name} ${REQUIRED_LIBRARIES})
add_dependencies(inet_tests_type inet_test)
add_executable(inet_train_p16_2 train.cpp ${tiny_dnn_headers})
target_link_libraries(inet_train_p16_2
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_train_p16_2 PRIVATE CNN_USE_POSIT CNN_POS_BITS=16 CNN_EXP_BITS=2 CNN_POS_STORAGE=int16_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_trains_type inet_train_p16_2)
add_executable(inet_test_p16_2 test.cpp ${tiny_dnn_headers})
target_link_libraries(inet_test_p16_2
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_p16_2 PRIVATE CNN_USE_POSIT CNN_POS_BITS=16 CNN_EXP_BITS=2 CNN_POS_STORAGE=int16_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_tests_type inet_test_p16_2)
add_executable(inet_test_p16_0 test.cpp ${tiny_dnn_headers})
target_link_libraries(inet_test_p16_0
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_p16_0 PRIVATE CNN_USE_POSIT CNN_POS_BITS=16 CNN_EXP_BITS=0 CNN_POS_STORAGE=int16_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_tests_type inet_test_p16_0)
add_executable(inet_train_p14_2 train.cpp ${tiny_dnn_headers})
target_link_libraries(inet_train_p14_2
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_train_p14_2 PRIVATE CNN_USE_POSIT CNN_POS_BITS=14 CNN_EXP_BITS=2 CNN_POS_STORAGE=int16_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_trains_type inet_train_p14_2)
add_executable(inet_test_p14_2 test.cpp ${tiny_dnn_headers})
target_link_libraries(inet_test_p14_2
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_p14_2 PRIVATE CNN_USE_POSIT CNN_POS_BITS=14 CNN_EXP_BITS=2 CNN_POS_STORAGE=int16_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_tests_type inet_test_p14_2)
add_executable(inet_test_p14_0 test.cpp ${tiny_dnn_headers})
target_link_libraries(inet_test_p14_0
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_p14_0 PRIVATE CNN_USE_POSIT CNN_POS_BITS=14 CNN_EXP_BITS=0 CNN_POS_STORAGE=int16_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_tests_type inet_test_p14_0)
add_executable(inet_train_p12_2 train.cpp ${tiny_dnn_headers})
target_link_libraries(inet_train_p12_2
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_train_p12_2 PRIVATE CNN_USE_POSIT CNN_POS_BITS=12 CNN_EXP_BITS=2 CNN_POS_STORAGE=int16_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_trains_type inet_train_p12_2)
add_executable(inet_test_p12_2 test.cpp ${tiny_dnn_headers})
target_link_libraries(inet_test_p12_2
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_p12_2 PRIVATE CNN_USE_POSIT CNN_POS_BITS=12 CNN_EXP_BITS=2 CNN_POS_STORAGE=int16_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_tests_type inet_test_p12_2)
add_executable(inet_test_p12_0 test.cpp ${tiny_dnn_headers})
target_link_libraries(inet_test_p12_0
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_p12_0 PRIVATE CNN_USE_POSIT CNN_POS_BITS=12 CNN_EXP_BITS=0 CNN_POS_STORAGE=int16_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_tests_type inet_test_p12_0)
add_executable(inet_test_p10_0 test.cpp ${tiny_dnn_headers})
target_link_libraries(inet_test_p10_0
......@@ -82,81 +28,5 @@ add_executable(inet_test_p12_0 test.cpp ${tiny_dnn_headers})
target_compile_definitions(inet_test_p8_0 PRIVATE CNN_USE_POSIT CNN_POS_BITS=8 CNN_EXP_BITS=0 CNN_POS_STORAGE=int8_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_tests_type inet_test_p8_0)
add_executable(inet_test_p7_0 test.cpp ${tiny_dnn_headers})
target_link_libraries(inet_test_p7_0
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_p7_0 PRIVATE CNN_USE_POSIT CNN_POS_BITS=7 CNN_EXP_BITS=1 CNN_POS_STORAGE=int8_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_tests_type inet_test_p7_0)
add_executable(inet_test_p6_0 test.cpp ${tiny_dnn_headers})
target_link_libraries(inet_test_p6_0
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_p6_0 PRIVATE CNN_USE_POSIT CNN_POS_BITS=6 CNN_EXP_BITS=0 CNN_POS_STORAGE=int8_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_tests_type inet_test_p6_0)
add_executable(inet_test_p5_0 test.cpp ${tiny_dnn_headers})
target_link_libraries(inet_test_p5_0
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_p5_0 PRIVATE CNN_USE_POSIT CNN_POS_BITS=5 CNN_EXP_BITS=0 CNN_POS_STORAGE=int8_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_tests_type inet_test_p5_0)
add_executable(inet_test_p4_0 test.cpp ${tiny_dnn_headers})
target_link_libraries(inet_test_p4_0
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_p4_0 PRIVATE CNN_USE_POSIT CNN_POS_BITS=4 CNN_EXP_BITS=0 CNN_POS_STORAGE=int8_t CNN_POS_BACKEND=uint32_t)
add_dependencies(inet_tests_type inet_test_p4_0)
if(TAB8)
add_executable(inet_train_posittab8 train.cpp ${TAB_POSIT_LIB8} ${tiny_dnn_headers})
target_link_libraries(inet_train_posittab8
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_train_posittab8 PRIVATE CNN_USE_POSIT CNN_TAB_TYPE=posit8)
add_executable(inet_test_posittab8 test.cpp ${TAB_POSIT_LIB8} ${tiny_dnn_headers})
target_link_libraries(inet_test_posittab8
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_posittab8 PRIVATE CNN_USE_POSIT CNN_TAB_TYPE=posit8)
add_dependencies(inet_tests_type inet_test_posittab8)
add_dependencies(inet_trains_type inet_train_posittab8)
endif(TAB8)
if(TAB10)
add_executable(inet_train_posittab10 train.cpp ${TAB_POSIT_LIB10} ${tiny_dnn_headers})
target_link_libraries(inet_train_posittab10
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_train_posittab10 PRIVATE CNN_USE_POSIT CNN_TAB_TYPE=posit10)
add_executable(inet_test_posittab10 test.cpp ${TAB_POSIT_LIB10} ${tiny_dnn_headers})
target_link_libraries(inet_test_posittab10
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_posittab10 PRIVATE CNN_USE_POSIT CNN_TAB_TYPE=posit10)
add_dependencies(inet_tests_type inet_test_posittab10)
add_dependencies(inet_trains_type inet_train_posittab10)
endif(TAB10)
if(TAB12)
add_executable(inet_train_posittab12 train.cpp ${TAB_POSIT_LIB12} ${tiny_dnn_headers})
target_link_libraries(inet_train_posittab12
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_train_posittab12 PRIVATE CNN_USE_POSIT CNN_TAB_TYPE=posit12)
add_executable(inet_test_posittab12 test.cpp ${TAB_POSIT_LIB12} ${tiny_dnn_headers})
target_link_libraries(inet_test_posittab12
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_posittab12 PRIVATE CNN_USE_POSIT CNN_TAB_TYPE=posit12)
add_dependencies(inet_tests_type inet_test_posittab12)
add_dependencies(inet_trains_type inet_train_posittab12)
endif(TAB12)
if(TAB14)
add_executable(inet_train_posittab14 train.cpp ${TAB_POSIT_LIB14} ${tiny_dnn_headers})
target_link_libraries(inet_train_posittab14
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_train_posittab14 PRIVATE CNN_USE_POSIT CNN_TAB_TYPE=posit14)
add_executable(inet_test_posittab14 test.cpp ${TAB_POSIT_LIB14} ${tiny_dnn_headers})
target_link_libraries(inet_test_posittab14
${project_library_target_name} ${REQUIRED_LIBRARIES})
target_compile_definitions(inet_test_posittab14 PRIVATE CNN_USE_POSIT CNN_TAB_TYPE=posit14)
add_dependencies(inet_tests_type inet_test_posittab14)
add_dependencies(inet_trains_type inet_train_posittab14)
endif(TAB14)
endif()
\ No newline at end of file
endif()
......@@ -3,48 +3,33 @@
//#define CNN_USE_POSIT
#include "tiny_dnn/tiny_dnn.h"
#include "tiny_dnn/models/alexnet.h"
#include "tiny_dnn/models/vgg16.h"
#include "tiny_dnn/models/resnet34.h"
using ns = std::chrono::nanoseconds;
using ms = std::chrono::milliseconds;
using get_time = std::chrono::steady_clock ;
using namespace tiny_dnn;
int main(int argc, char **argv) {
tiny_dnn::models::alexnet<227,227,1000> net;
for(int i = 0; i < 1; ++i) {
std::cout << i << "/50" << std::endl;
net.predict(image<tiny_dnn::float_t>(shape3d(227,227,3),image_type::rgb));
}
/*tiny_dnn::network<tiny_dnn::sequential> net;
std::string model_path(argv[1]);
std::string data_path(argv[2]);
net.load(model_path); //load pre-trained model
std::vector<tiny_dnn::label_t> vlab;
std::vector<tiny_dnn::vec_t> vimg;
std::vector<unsigned int> times;
parse_binImagenet<32,32,3,uint8_t>(data_path,&vimg,&vlab,-1,1);
int successes = 0;
auto start = get_time::now();
double test_time(tiny_dnn::network<tiny_dnn::sequential>& net,size_t width,size_t height) {
image<> tmp(shape3d(width,height,3),image_type::rgb);
auto start = get_time::now();
net.predict(tmp);
auto end = get_time::now();
for(unsigned int i = 0; i < 1000; ++i) {
auto inner_b = get_time::now();
auto res = net.predict(vimg[i]);
auto inner_e = get_time::now();
std::vector<std::pair<double, unsigned int>> scores;
for (int j = 0; j < 42; j++) scores.emplace_back(res[j], j);
sort(scores.begin(), scores.end(), std::greater<std::pair<double, int>>());
if(vlab[i] == scores[0].second) successes++;
std::cout << "Inferencing " << i+1 << "/" << vimg.size() << "(Acc: " << (double)successes/double(i+1) << ")" << std::endl;
std::clog << "\r" << i+1 << "/" << vimg.size();
unsigned int tttt = std::chrono::duration_cast<ms>(inner_e - inner_b).count();
times.push_back(tttt);
}
auto end = get_time::now();
auto diff = end - start;
make_test_report(vimg,successes,times,std::chrono::duration_cast<ms>(diff).count(),"../../plot/test_perf_");
std::cout << "Accuracy: " << successes << "/" << vimg.size() << "\n";
std::cout << "Time elapsed: " << std::chrono::duration_cast<ms>(diff).count() << "ms\n";
return 0;*/
}
\ No newline at end of file
return double(std::chrono::duration_cast<ms>(end - start).count());
}
int main(int argc, char **argv) {
alexnet<224,224,1000> net;
vgg16<1000> net2;
resnet34<224,224,1000> net3;
std::cout << "Testing Alexnet...\n";
//std::cout << "Elapsed (ms): " << test_time(net,224,224);
std::cout << "\nTesting VGG16...\n";
//std::cout << "Elapsed (ms): " << test_time(net2,224,224);
std::cout << "\nTesting Resnet...\n";
std::cout << "Elapsed (ms): " << test_time(net3,224,224);
}
......@@ -73,9 +73,9 @@ int main(int argc, char **argv) {
std::vector<vec_t> timg,vimg;
std::string base_path(argv[1]);
for(int i = 1; i <= 1;++i)
parse_binImagenet<32,32,3,uint16_t>(base_path+"/train_data_batch_"+std::to_string(i)+".bin",&timg,&tlab,-1,1);
parse_binImagenet<32,32,3,uint16_t>(base_path+"/train_data_batch_"+std::to_string(i)+".bin",&timg,&tlab,-4,4);
std::cout << "Num train images: " << timg.size() << std::endl;
parse_binImagenet<32,32,3,uint16_t>(argv[2],&vimg,&vlab,-1,1);
parse_binImagenet<32,32,3,uint16_t>(argv[2],&vimg,&vlab,-4,4);
std::cout << "Num val images: " << vimg.size() << std::endl;
tiny_dnn::adagrad optimizer;
optimizer.alpha*=4;
......
......@@ -31,8 +31,8 @@ class shortcut_layer : public activation_layer {
for (size_t j = 0; j < x.size(); j++) {
y[j] = x[j];
if(side_ == END) {
y[j] += shortcuts_.at(mapping_)->front().at(j);
shortcuts_.at(mapping_)->pop();
y[j] += float_t(0);/*shortcuts_.at(mapping_)->front().at(j);*/
//shortcuts_.at(mapping_)->pop();
}
}
}
......
#pragma once
namespace tiny_dnn {
namespace residual {
std::vector<layer*> make_block(size_t inputWidth,size_t inputHeight,size_t inputChannels) {
std::vector<layer*> make_block2(size_t inputWidth,size_t inputHeight,size_t inputChannels,bool sub) {
using sc = tiny_dnn::shortcut_layer;
using conv = tiny_dnn::convolutional_layer;
using relu = tiny_dnn::relu_layer;
std::vector<layer*> residualBlocks;
residualBlocks.push_back(new sc(tiny_dnn::ShortcutSide::BEGIN,inputWidth,inputHeight,inputChannels));
size_t stride = sub?2:1;
size_t outputChannels = inputChannels*((sub)?2:1);
residualBlocks.push_back(
new conv(inputWidth,inputHeight,3,inputChannels,inputChannels,padding::same,true, 1, 1, 1, 1)
new conv(inputWidth,inputHeight,3,inputChannels,outputChannels,padding::same,true, stride, stride, stride, stride)
);
residualBlocks.push_back(new relu());
inputWidth/=(sub)?2:1;
inputHeight/=(sub)?2:1;
residualBlocks.push_back(
new conv(inputWidth,inputHeight,3,inputChannels,inputChannels,padding::same,true, 1, 1, 1, 1)
new conv(inputWidth,inputHeight,3,outputChannels,outputChannels,padding::same,true, 1, 1, 1, 1)
);
residualBlocks.push_back(new sc(tiny_dnn::ShortcutSide::END,inputWidth,inputHeight,inputChannels));
residualBlocks.push_back(new sc(tiny_dnn::ShortcutSide::END,inputWidth,inputHeight,outputChannels));
for(auto l:residualBlocks) {
std::cout << l->layer_type() << std::endl;
}
return residualBlocks;
}
}
}
\ No newline at end of file
}
......@@ -9,7 +9,7 @@
#include <string>
namespace models {
// Based on:
// https://github.com/DeepMark/deepmark/blob/master/torch/image%2Bvideo/alexnet.lua
......@@ -18,14 +18,14 @@ class alexnet : public tiny_dnn::network<tiny_dnn::sequential> {
private:
size_t getConvOutputSize(size_t input,size_t stride,size_t f_input) {
float output = (float(input)-float(f_input))/float(stride) + 1;
std::cout << "[CONV " << f_input << "," << stride << "] In: " << input << "x" << input << " Out:" << output << "x" << output << std::endl;
//std::cout << "[CONV " << f_input << "," << stride << "] In: " << input << "x" << input << " Out:" << output << "x" << output << std::endl;
return std::floor(output);
}
size_t getConvOutputSize(size_t input,size_t stride,size_t f_input,size_t& s) {
float output = (float(input)-float(f_input))/float(stride) + 1;
s = std::floor(output);
std::cout << "[CONV " << f_input << "," << stride << "] In: " << input << "x" << input << " Out:" << s << "x" << s << std::endl;
//std::cout << "[CONV " << f_input << "," << stride << "] In: " << input << "x" << input << " Out:" << s << "x" << s << std::endl;
return s;
}
public:
......@@ -65,4 +65,4 @@ class alexnet : public tiny_dnn::network<tiny_dnn::sequential> {
};
} // namespace models
/*
Copyright (c) 2013, Taiga Nomi and the respective contributors
All rights reserved.
Use of this source code is governed by a BSD-style license that can be found
in the LICENSE file.
*/
#pragma once
#include <string>
// Based on:
// https://github.com/DeepMark/deepmark/blob/master/torch/image%2Bvideo/alexnet.lua
template <size_t width,size_t height,size_t n_classes>
class resnet34 : public tiny_dnn::network<tiny_dnn::sequential> {
public:
explicit resnet34(const std::string &name = "")
: tiny_dnn::network<tiny_dnn::sequential>(name) {
// todo: (karandesai) shift this to tiny_dnn::activation
using relu = tiny_dnn::activation::relu;
using conv = tiny_dnn::layers::conv;
using fc = tiny_dnn::layers::fc;
using max_pool = tiny_dnn::layers::max_pool;
using ave_pool = tiny_dnn::global_average_pooling_layer;
using sotfmax = tiny_dnn::activation::softmax;
size_t img_width = width,img_height = height;
*this << conv(img_width,img_height,7,7,3,64,padding::same,true, 2, 2, 2, 2) << relu();
*this << max_pool(img_width=img_width/2,img_height=img_height/2,64,3,3,2,2,false,padding::same);
*this << tiny_dnn::residual::make_block2(img_width=img_width/2,img_height=img_height/2,64,false);
*this << tiny_dnn::residual::make_block2(img_width,img_height,64,false);
*this << tiny_dnn::residual::make_block2(img_width,img_height,64,true);
*this << tiny_dnn::residual::make_block2(img_width=img_width/2,img_height=img_height/2,128,false);
*this << tiny_dnn::residual::make_block2(img_width,img_height,128,false);
*this << tiny_dnn::residual::make_block2(img_width,img_height,128,false);
*this << tiny_dnn::residual::make_block2(img_width,img_height,128,true);
*this << tiny_dnn::residual::make_block2(img_width=img_width/2,img_height=img_height/2,256,false);
*this << tiny_dnn::residual::make_block2(img_width,img_height,256,false);
*this << tiny_dnn::residual::make_block2(img_width,img_height,256,false);
*this << tiny_dnn::residual::make_block2(img_width,img_height,256,false);
*this << tiny_dnn::residual::make_block2(img_width,img_height,256,false);
*this << tiny_dnn::residual::make_block2(img_width,img_height,256,true);
*this << tiny_dnn::residual::make_block2(img_width=img_width/2,img_height=img_height/2,512,false);
*this << tiny_dnn::residual::make_block2(img_width,img_height,512,false);
*this << tiny_dnn::residual::make_block2(img_width,img_height,512,false);
*this << ave_pool(img_width,img_height,512) << fc(512,1000) << sotfmax();
}
};
/*
Copyright (c) 2013, Taiga Nomi and the respective contributors
All rights reserved.
Use of this source code is governed by a BSD-style license that can be found
in the LICENSE file.
*/
#pragma once
#include <string>
// Based on:
// https://github.com/DeepMark/deepmark/blob/master/torch/image%2Bvideo/alexnet.lua
template <size_t n_classes>
class vgg16 : public tiny_dnn::network<tiny_dnn::sequential> {
public:
explicit vgg16(const std::string &name = "")
: tiny_dnn::network<tiny_dnn::sequential>(name) {
// todo: (karandesai) shift this to tiny_dnn::activation
using relu = tiny_dnn::activation::relu;
using conv = tiny_dnn::layers::conv;
using fc = tiny_dnn::layers::fc;
using max_pool = tiny_dnn::layers::max_pool;
using sotfmax = tiny_dnn::activation::softmax;
// BLOCK 1
*this << conv(224, 224, 3, 3, 3, 64, padding::same);
*this << relu();
*this << conv(224, 224, 3, 3, 64, 64, padding::same);
*this << relu();
*this << max_pool(224, 224, 64, 2);
// BLOCK 2
*this << conv(112, 112, 3, 3, 64, 128, padding::same);
*this << relu();
*this << conv(112, 112, 3, 3, 128, 128, padding::same);
*this << relu();
*this << max_pool(112, 112, 128, 2);
// BLOCK 3
*this << conv(56, 56, 3, 3, 128, 256, padding::same);
*this << relu();
*this << conv(56, 56, 3, 3, 256, 256, padding::same);
*this << relu();
*this << conv(56, 56, 3, 3, 256, 256, padding::same);
*this << relu();
*this << max_pool(56, 56, 256, 2);
// BLOCK 4
*this << conv(28, 28, 3, 3, 256, 512, padding::same);
*this << relu();
*this << conv(28, 28, 3, 3, 512, 512, padding::same);
*this << relu();
*this << conv(28, 28, 3, 3, 512, 512, padding::same);
*this << relu();
*this << max_pool(28, 28, 512, 2);
// BLOCK 5
*this << conv(14, 14, 3, 3, 512, 512, padding::same);
*this << relu();
*this << conv(14, 14, 3, 3, 512, 512, padding::same);
*this << relu();
*this << conv(14, 14, 3, 3, 512, 512, padding::same);
*this << relu();
*this << max_pool(14, 14, 512, 2);
// FULLY CONNECTED
*this << fc(7*7*512,4096) << relu();
*this << fc(4096,4096) << relu();
*this << fc(4096,n_classes) << sotfmax();
}
};
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment