CodeGuru Home VC++ / MFC / C++ .NET / C# Visual Basic VB Forums Developer.com
Results 1 to 3 of 3
  1. #1
    Join Date
    Dec 2013
    Posts
    75

    Advice on implementing my ANN class

    Hi guys,

    I'd like some advice on designing and implementing my neural network class. It has three input neurons, four hidden neurons (one of which is a bias neuron, having the connections from the input layer of weight 1.0), and one output neuron.

    It's goal is to determine where (x, y, z) lies in relation to a given 3D function.

    Here's the code I have so far:

    Typedefs.hpp
    Code:
    #ifndef TYPEDEF_HPP
    #define TYPEDEF_HPP
    #include <vector>
    #include <random>
    
    typedef std::vector <float>FloatV;
    typedef std::random_device RNG;
    typedef std::uniform_int_distribution <> UID;
    
    #endif
    BadInput.hpp
    Code:
    #ifndef BADINPUT_HPP
    #define BADINPUT_HPP
    
    class BadInput
    {
      public:
    	BadInput(int val):difference(val)
    	{
    	}
    	int difference;
    };
    
    #endif
    Functor.hpp
    Code:
    #ifndef FUNCTOR_HPP
    #define FUNCTOR_HPP
    #include "Typedefs.hpp"
    
    class Functor
    {
      public:
    	Functor();
    	Functor(const Functor & rhs) = default;
       virtual ~Functor() =default; 
    	virtual float operator() (const FloatV  & input) const =0;
    	virtual int GetArgNum() const =0;
    };
    
    class Plane: public Functor
    {
      public:
    	Plane() = delete;
    	Plane(const FloatV& values);
    	Plane(const Plane & rhs) = default;
    	 virtual ~Plane() = default; 
    	virtual float operator() (const FloatV & input) const;
    	virtual int GetArgNum() const;
      private:
         const int argnum;
         const FloatV cf;
    };
    
    #endif
    Functor.cpp
    Code:
    #include "BadInput.hpp"
    #include "Functor.hpp"
    #include <iostream>
    
    Functor::Functor()
    {
    }
    
    Functor::~Functor()
    {
    }
    
    Plane::Plane(const FloatV& val):cf(val), argnum(2)
    {
    }
    
    float Plane::operator() (const FloatV & input) const
    	{ 
    		if (input.size() != argnum)
    		{
    			int val = input.size() - argnum;
    			BadInput error(val);
    			std::cerr << "Number of arguments is " << val << " more than the maximum arguments of two for this function." << std::endl;
    			throw error;
    		}
    		
    		 vector < future < float >>results(input.size());
    		 auto sum =[this, input] (int i)->float { return input[i] * cf[i];
    		 }
    		 for (int i = 0; i < input.size(); i++)
    			 results.push_back(async(sum, i));
    		 float sumofprod =0;
    	   for (auto & result:results)
    			 sumofprod += result.get();
    		 return sumofprod += cf.back();	
    }
    	
    	int Plane::GetArgNum() const
    	{
    		return argnum;
    	}
    Trainer.hpp
    Code:
    #ifndef TRAINER_HPP
    #define TRAINER_HPP
    #include "Typedefs.hpp"
    #include "Functor.hpp"
    
    class Trainer
    {
      public:
    	Trainer() = delete;
    	Trainer(const Functor& function);
    	void GenerateData();
    	  FloatV& GetData();
    	int GetAnswer() const;
      private:
    	const Functor& f;
    	mutable FloatV TrainingData;
    	mutable int answer;
    };
    
    #endif
    Trainer.cpp
    Code:
    #include "BadInput.hpp"
    #include "Trainer.hpp"
    #include <future>
    using std::async; 
    
    namespace
    {
    	RNG rand;
    }
    
    Trainer::Trainer(const Functor& function):f(function),
    TrainingData(f.GetArgNum())
    {
    }
    
    void Trainer::GenerateData()
    {
    	auto randomize = [this](int i) {TrainingData[i]=rand();}
    	
        for (int i =0; i < TrainingData.size(); i++)
        async(randomize, TrainingData[i]);	
    	
    	float z;
    	try
    	{
    		z = f(TrainingData);
    	}
    	catch(BadInput&)
    	{
    		throw;
    	}
    	TrainingData.push_back(rand());
    	if (TrainingData.back() < z)
    		answer = -1;
    	else
    		answer = 1;
    }
    
    FloatV& Trainer::GetData()
    {
    	return TrainingData;
    }
    
    int Trainer::GetAnswer() const
    {
    	return answer;
    }
    Perceptron.hpp
    Code:
    #ifndef PERCEPTRON_HPP
    #define PERCEPTRON_HPP
    #include "Typedefs.hpp"
    #include "Trainer.hpp"
    
    class Perceptron
    {
      public:
        Perceptron() = delete;
    	Perceptron(int val);
    	~Perceptron();
    	float feedforward(FloatV& input);
      private:
    	FloatV weights;
    	static const float bias;
    	static const float correction;
    };
    #endif
    Perceptron.cpp
    Code:
    #include "BadInput.hpp"
    #include "Perceptron.hpp"
    #include <iostream>
    #include <cmath>
    #include <future>
    using std::async;
    using std::future;
    using std::exp;
    
    const float Perceptron::bias = 1.0;
    const float Perceptron::correction = 0.01;
    
    namespace
    {
    	UID rand(-1, 1);
        RNG rng;
    }
    
    Perceptron::Perceptron(int val):weights(rand(rng))
    {
    }
    
    Perceptron::~Perceptron()
    {
    }
    
    int Perceptron::feedforward(FloatV& input)
    {
    	float sum = 0;
    	input.push_back(bias);
    	if (input.size() != weights.size())
    	{
    		BadInput error(input.size() - weights.size());
    		std::cerr << "Biased input is " << error.difference <<
    			" value different from the number of weights in the perceptron." <<
    			std::endl;
    		throw error;
    	}
    	
    	vector <future<float>> results;
    	 auto WeightedInput = [this](float x) -> float {return x*weights[i];}
    	for (int i=0; i < input.size(); i++)		   results.push_back(async(WeightedInput, input[i]));
    	
    	float sum=0;
    	for(auto& result: results)
    	sum += result.get();
    	
    	return 1.0/(1.0 + exp(-sum));	
    }
    NeuralNetwork.hpp
    Code:
    #ifndef NEURAL_NETWORK_HPP
    #define NEURAL_NETWORK_HPP
    #include "Trainer.hpp"
    #include "Perceptron.hpp"
    #include "Typedefs.hpp"
    
    class NeuralNetwork
    {
    	public:
    	NeuralNetwork() = delete; 
    	NeuralNetwork(Trainer& trainer);
    	float operator()(FloatV& input);
    	private:
    	void Train(Trainer& trainer);
    	void Classify(FloatV& input);
    	vector<Perceptron> InputLayer;
    	vector<Perceptron> HiddenLayer;
    	Perceptron Output;
    	FloatV InputWeight;
    	FloatV OutputWeight;
    };
    
    #endif
    I'm having code block right now. Also, I want the vector operations to run in it's own thread, as speed is critical.

    Thanks,
    Lexi

  2. #2
    VictorN's Avatar
    VictorN is offline Super Moderator Power Poster
    Join Date
    Jan 2003
    Location
    Hanover Germany
    Posts
    20,396

    Re: Advice on implementing my ANN class

    1. Does this code compile/run or not? Or there are some logic or other problems?
    2. If it runs - did you debug it?
    3. Do you know how to start a worker thread? Communicate between threads? Synchronize threads?
    Last edited by VictorN; February 7th, 2014 at 01:44 PM.
    Victor Nijegorodov

  3. #3
    Join Date
    Apr 1999
    Posts
    27,449

    Re: Advice on implementing my ANN class

    Also, I want the vector operations to run in it's own thread, as speed is critical.
    Get it to run on a single-thread first, and make sure the program is fully tested and debugged before even thinking about multiple threads.

    Regards,

    Paul McKenzie

Posting Permissions

  • You may not post new threads
  • You may not post replies
  • You may not post attachments
  • You may not edit your posts
  •  





Click Here to Expand Forum to Full Width

Featured