pktools 2.6.7
Processing Kernel for geospatial data
myfann_cpp.h
1#ifndef FANN_CPP_H_INCLUDED
2#define FANN_CPP_H_INCLUDED
3
4/*
5 *
6 * Fast Artificial Neural Network (fann) C++ Wrapper
7 * Copyright (C) 2004-2006 created by freegoldbar (at) yahoo dot com
8 *
9 * This wrapper is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This wrapper is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
25/*
26 * Title: FANN Wrapper for C++
27 *
28 * Overview:
29 *
30 * The Fann Wrapper for C++ provides two classes: <neural_net>
31 * and <training_data>. To use the wrapper include
32 * doublefann.h, floatfann.h or fixedfann.h before the
33 * fann_cpp.h header file. To get started see xor_sample.cpp
34 * in the examples directory. The license is LGPL. Copyright (C)
35 * 2004-2006 created by <freegoldbar@yahoo.com>.
36 *
37 * Note: Notes and differences from C API
38 *
39 * - The Fann Wrapper for C++ is a minimal wrapper without use of
40 * templates or exception handling for efficient use in any environment.
41 * Benefits include stricter type checking, simpler memory
42 * management and possibly code completion in program editor.
43 * - Method names are the same as the function names in the C
44 * API except the fann_ prefix has been removed. Enums in the
45 * namespace are similarly defined without the FANN_ prefix.
46 * - The arguments to the methods are the same as the C API
47 * except that the struct fann *ann/struct fann_train_data *data
48 * arguments are encapsulated so they are not present in the
49 * method signatures or are translated into class references.
50 * - The various create methods return a boolean set to true to
51 * indicate that the neural network was created, false otherwise.
52 * The same goes for the read_train_from_file method.
53 * - The neural network and training data is automatically cleaned
54 * up in the destructors and create/read methods.
55 * - To make the destructors virtual define USE_VIRTUAL_DESTRUCTOR
56 * before including the header file.
57 * - Additional methods are available on the training_data class to
58 * give access to the underlying training data. They are get_input,
59 * get_output and set_train_data. Finally fann_duplicate_train_data
60 * has been replaced by a copy constructor.
61 *
62 * Note: Changes
63 *
64 * Version 2.1.0:
65 * - General update to fann C library 2.1.0 with support for new functionality
66 * - Due to changes in the C API the C++ API is not fully backward compatible:
67 * The create methods have changed names and parameters.
68 * The training callback function has different parameters and a set_callback.
69 * Some <training_data> methods have updated names.
70 * Get activation function and steepness is available for neurons, not layers.
71 * - Extensions are now part of fann so there is no fann_extensions.h
72 *
73 * Version 1.2.0:
74 * - Changed char pointers to const std::string references
75 * - Added const_casts where the C API required it
76 * - Initialized enums from the C enums instead of numeric constants
77 * - Added a method set_train_data that copies and allocates training
78 * - data in a way that is compatible with the way the C API deallocates
79 * - the data thus making it possible to change training data.
80 * - The get_rprop_increase_factor method did not return its value
81 *
82 * Version 1.0.0:
83 * - Initial version
84 *
85 */
86
87#include <iostream>
88#include <stdarg.h>
89#include <string>
90#include <vector>
91#include <cassert>
92#include "base/Vector2d.h"
93
94/* Namespace: FANN
95 The FANN namespace groups the C++ wrapper definitions */
96namespace FANN
97{
98 /* Enum: error_function_enum
99 Error function used during training.
100
101 ERRORFUNC_LINEAR - Standard linear error function.
102 ERRORFUNC_TANH - Tanh error function, usually better
103 but can require a lower learning rate. This error function agressively targets outputs that
104 differ much from the desired, while not targetting outputs that only differ a little that much.
105 This activation function is not recommended for cascade training and incremental training.
106
107 See also:
108 <neural_net::set_train_error_function>, <neural_net::get_train_error_function>
109 */
110 enum error_function_enum {
111 ERRORFUNC_LINEAR = FANN_ERRORFUNC_LINEAR,
112 ERRORFUNC_TANH
113 };
114
115 /* Enum: stop_function_enum
116 Stop criteria used during training.
117
118 STOPFUNC_MSE - Stop criteria is Mean Square Error (MSE) value.
119 STOPFUNC_BIT - Stop criteria is number of bits that fail. The number of bits; means the
120 number of output neurons which differ more than the bit fail limit
121 (see <neural_net::get_bit_fail_limit>, <neural_net::set_bit_fail_limit>).
122 The bits are counted in all of the training data, so this number can be higher than
123 the number of training data.
124
125 See also:
126 <neural_net::set_train_stop_function>, <neural_net::get_train_stop_function>
127 */
128 enum stop_function_enum
129 {
130 STOPFUNC_MSE = FANN_STOPFUNC_MSE,
131 STOPFUNC_BIT
132 };
133
134 /* Enum: training_algorithm_enum
135 The Training algorithms used when training on <training_data> with functions like
136 <neural_net::train_on_data> or <neural_net::train_on_file>. The incremental training
137 looks alters the weights after each time it is presented an input pattern, while batch
138 only alters the weights once after it has been presented to all the patterns.
139
140 TRAIN_INCREMENTAL - Standard backpropagation algorithm, where the weights are
141 updated after each training pattern. This means that the weights are updated many
142 times during a single epoch. For this reason some problems, will train very fast with
143 this algorithm, while other more advanced problems will not train very well.
144 TRAIN_BATCH - Standard backpropagation algorithm, where the weights are updated after
145 calculating the mean square error for the whole training set. This means that the weights
146 are only updated once during a epoch. For this reason some problems, will train slower with
147 this algorithm. But since the mean square error is calculated more correctly than in
148 incremental training, some problems will reach a better solutions with this algorithm.
149 TRAIN_RPROP - A more advanced batch training algorithm which achieves good results
150 for many problems. The RPROP training algorithm is adaptive, and does therefore not
151 use the learning_rate. Some other parameters can however be set to change the way the
152 RPROP algorithm works, but it is only recommended for users with insight in how the RPROP
153 training algorithm works. The RPROP training algorithm is described by
154 [Riedmiller and Braun, 1993], but the actual learning algorithm used here is the
155 iRPROP- training algorithm which is described by [Igel and Husken, 2000] which
156 is an variety of the standard RPROP training algorithm.
157 TRAIN_QUICKPROP - A more advanced batch training algorithm which achieves good results
158 for many problems. The quickprop training algorithm uses the learning_rate parameter
159 along with other more advanced parameters, but it is only recommended to change these
160 advanced parameters, for users with insight in how the quickprop training algorithm works.
161 The quickprop training algorithm is described by [Fahlman, 1988].
162
163 See also:
164 <neural_net::set_training_algorithm>, <neural_net::get_training_algorithm>
165 */
166 enum training_algorithm_enum {
167 TRAIN_INCREMENTAL = FANN_TRAIN_INCREMENTAL,
168 TRAIN_BATCH,
169 TRAIN_RPROP,
170 TRAIN_QUICKPROP
171 };
172
173 /* Enum: activation_function_enum
174
175 The activation functions used for the neurons during training. The activation functions
176 can either be defined for a group of neurons by <neural_net::set_activation_function_hidden>
177 and <neural_net::set_activation_function_output> or it can be defined for a single neuron by
178 <neural_net::set_activation_function>.
179
180 The steepness of an activation function is defined in the same way by
181 <neural_net::set_activation_steepness_hidden>, <neural_net::set_activation_steepness_output>
182 and <neural_net::set_activation_steepness>.
183
184 The functions are described with functions where:
185 * x is the input to the activation function,
186 * y is the output,
187 * s is the steepness and
188 * d is the derivation.
189
190 FANN_LINEAR - Linear activation function.
191 * span: -inf < y < inf
192 * y = x*s, d = 1*s
193 * Can NOT be used in fixed point.
194
195 FANN_THRESHOLD - Threshold activation function.
196 * x < 0 -> y = 0, x >= 0 -> y = 1
197 * Can NOT be used during training.
198
199 FANN_THRESHOLD_SYMMETRIC - Threshold activation function.
200 * x < 0 -> y = 0, x >= 0 -> y = 1
201 * Can NOT be used during training.
202
203 FANN_SIGMOID - Sigmoid activation function.
204 * One of the most used activation functions.
205 * span: 0 < y < 1
206 * y = 1/(1 + exp(-2*s*x))
207 * d = 2*s*y*(1 - y)
208
209 FANN_SIGMOID_STEPWISE - Stepwise linear approximation to sigmoid.
210 * Faster than sigmoid but a bit less precise.
211
212 FANN_SIGMOID_SYMMETRIC - Symmetric sigmoid activation function, aka. tanh.
213 * One of the most used activation functions.
214 * span: -1 < y < 1
215 * y = tanh(s*x) = 2/(1 + exp(-2*s*x)) - 1
216 * d = s*(1-(y*y))
217
218 FANN_SIGMOID_SYMMETRIC - Stepwise linear approximation to symmetric sigmoid.
219 * Faster than symmetric sigmoid but a bit less precise.
220
221 FANN_GAUSSIAN - Gaussian activation function.
222 * 0 when x = -inf, 1 when x = 0 and 0 when x = inf
223 * span: 0 < y < 1
224 * y = exp(-x*s*x*s)
225 * d = -2*x*s*y*s
226
227 FANN_GAUSSIAN_SYMMETRIC - Symmetric gaussian activation function.
228 * -1 when x = -inf, 1 when x = 0 and 0 when x = inf
229 * span: -1 < y < 1
230 * y = exp(-x*s*x*s)*2-1
231 * d = -2*x*s*(y+1)*s
232
233 FANN_ELLIOT - Fast (sigmoid like) activation function defined by David Elliott
234 * span: 0 < y < 1
235 * y = ((x*s) / 2) / (1 + |x*s|) + 0.5
236 * d = s*1/(2*(1+|x*s|)*(1+|x*s|))
237
238 FANN_ELLIOT_SYMMETRIC - Fast (symmetric sigmoid like) activation function defined by David Elliott
239 * span: -1 < y < 1
240 * y = (x*s) / (1 + |x*s|)
241 * d = s*1/((1+|x*s|)*(1+|x*s|))
242
243 FANN_LINEAR_PIECE - Bounded linear activation function.
244 * span: 0 < y < 1
245 * y = x*s, d = 1*s
246
247 FANN_LINEAR_PIECE_SYMMETRIC - Bounded Linear activation function.
248 * span: -1 < y < 1
249 * y = x*s, d = 1*s
250
251 FANN_SIN_SYMMETRIC - Periodical sinus activation function.
252 * span: -1 <= y <= 1
253 * y = sin(x*s)
254 * d = s*cos(x*s)
255
256 FANN_COS_SYMMETRIC - Periodical cosinus activation function.
257 * span: -1 <= y <= 1
258 * y = cos(x*s)
259 * d = s*-sin(x*s)
260
261 See also:
262 <neural_net::set_activation_function_hidden>,
263 <neural_net::set_activation_function_output>
264 */
265 enum activation_function_enum {
266 LINEAR = FANN_LINEAR,
267 THRESHOLD,
268 THRESHOLD_SYMMETRIC,
269 SIGMOID,
270 SIGMOID_STEPWISE,
271 SIGMOID_SYMMETRIC,
272 SIGMOID_SYMMETRIC_STEPWISE,
273 GAUSSIAN,
274 GAUSSIAN_SYMMETRIC,
275 GAUSSIAN_STEPWISE,
276 ELLIOT,
277 ELLIOT_SYMMETRIC,
278 LINEAR_PIECE,
279 LINEAR_PIECE_SYMMETRIC,
280 SIN_SYMMETRIC,
281 COS_SYMMETRIC
282 };
283
284 /* Enum: network_type_enum
285
286 Definition of network types used by <neural_net::get_network_type>
287
288 LAYER - Each layer only has connections to the next layer
289 SHORTCUT - Each layer has connections to all following layers
290
291 See Also:
292 <neural_net::get_network_type>, <fann_get_network_type>
293
294 This enumeration appears in FANN >= 2.1.0
295 */
296 enum network_type_enum
297 {
298 LAYER = FANN_NETTYPE_LAYER,
299 SHORTCUT
300 };
301
302 /* Type: connection
303
304 Describes a connection between two neurons and its weight
305
306 from_neuron - Unique number used to identify source neuron
307 to_neuron - Unique number used to identify destination neuron
308 weight - The numerical value of the weight
309
310 See Also:
311 <neural_net::get_connection_array>, <neural_net::set_weight_array>
312
313 This structure appears in FANN >= 2.1.0
314 */
315 typedef struct fann_connection connection;
316
317 /* Forward declaration of class neural_net and training_data */
318 class neural_net;
319 class training_data;
320
321 /* Type: callback_type
322 This callback function can be called during training when using <neural_net::train_on_data>,
323 <neural_net::train_on_file> or <neural_net::cascadetrain_on_data>.
324
325 >typedef int (*callback_type) (neural_net &net, training_data &train,
326 > unsigned int max_epochs, unsigned int epochs_between_reports,
327 > float desired_error, unsigned int epochs, void *user_data);
328
329 The callback can be set by using <neural_net::set_callback> and is very usefull for doing custom
330 things during training. It is recommended to use this function when implementing custom
331 training procedures, or when visualizing the training in a GUI etc. The parameters which the
332 callback function takes is the parameters given to the <neural_net::train_on_data>, plus an epochs
333 parameter which tells how many epochs the training have taken so far.
334
335 The callback function should return an integer, if the callback function returns -1, the training
336 will terminate.
337
338 Example of a callback function that prints information to cout:
339 >int print_callback(FANN::neural_net &net, FANN::training_data &train,
340 > unsigned int max_epochs, unsigned int epochs_between_reports,
341 > float desired_error, unsigned int epochs, void *user_data)
342 >{
343 > cout << "Epochs " << setw(8) << epochs << ". "
344 > << "Current Error: " << left << net.get_MSE() << right << endl;
345 > return 0;
346 >}
347
348 See also:
349 <neural_net::set_callback>, <fann_callback_type>
350 */
351 typedef int (*callback_type) (neural_net &net, training_data &train,
352 unsigned int max_epochs, unsigned int epochs_between_reports,
353 float desired_error, unsigned int epochs, void *user_data);
354
355 /*************************************************************************/
356
357 /* Class: training_data
358
359 Encapsulation of a training data set <struct fann_train_data> and
360 associated C API functions.
361 */
363 {
364 public:
365 /* Constructor: training_data
366
367 Default constructor creates an empty neural net.
368 Use <read_train_from_file>, <set_train_data> or <create_train_from_callback> to initialize.
369 */
370 training_data() : train_data(NULL)
371 {
372 }
373
374 /* Constructor: training_data
375
376 Copy constructor constructs a copy of the training data.
377 Corresponds to the C API <fann_duplicate_train_data> function.
378 */
379 training_data(const training_data &data)
380 {
381 destroy_train();
382 if (data.train_data != NULL)
383 {
384 train_data = fann_duplicate_train_data(data.train_data);
385 }
386 }
387
388 /* Destructor: ~training_data
389
390 Provides automatic cleanup of data.
391 Define USE_VIRTUAL_DESTRUCTOR if you need the destructor to be virtual.
392
393 See also:
394 <destroy>
395 */
396#ifdef USE_VIRTUAL_DESTRUCTOR
397 virtual
398#endif
400 {
401 destroy_train();
402 }
403
404 /* Method: destroy
405
406 Destructs the training data. Called automatically by the destructor.
407
408 See also:
409 <~training_data>
410 */
411 void destroy_train()
412 {
413 if (train_data != NULL)
414 {
415 fann_destroy_train(train_data);
416 train_data = NULL;
417 }
418 }
419
420 /* Method: read_train_from_file
421 Reads a file that stores training data.
422
423 The file must be formatted like:
424 >num_train_data num_input num_output
425 >inputdata seperated by space
426 >outputdata seperated by space
427 >
428 >.
429 >.
430 >.
431 >
432 >inputdata seperated by space
433 >outputdata seperated by space
434
435 See also:
436 <neural_net::train_on_data>, <save_train>, <fann_read_train_from_file>
437
438 This function appears in FANN >= 1.0.0
439 */
440 bool read_train_from_file(const std::string &filename)
441 {
442 destroy_train();
443 train_data = fann_read_train_from_file(filename.c_str());
444 return (train_data != NULL);
445 }
446
447 /* Method: save_train
448
449 Save the training structure to a file, with the format as specified in <read_train_from_file>
450
451 Return:
452 The function returns true on success and false on failure.
453
454 See also:
455 <read_train_from_file>, <save_train_to_fixed>, <fann_save_train>
456
457 This function appears in FANN >= 1.0.0.
458 */
459 bool save_train(const std::string &filename)
460 {
461 if (train_data == NULL)
462 {
463 return false;
464 }
465 if (fann_save_train(train_data, filename.c_str()) == -1)
466 {
467 return false;
468 }
469 return true;
470 }
471
472 /* Method: save_train_to_fixed
473
474 Saves the training structure to a fixed point data file.
475
476 This function is very usefull for testing the quality of a fixed point network.
477
478 Return:
479 The function returns true on success and false on failure.
480
481 See also:
482 <save_train>, <fann_save_train_to_fixed>
483
484 This function appears in FANN >= 1.0.0.
485 */
486 bool save_train_to_fixed(const std::string &filename, unsigned int decimal_point)
487 {
488 if (train_data == NULL)
489 {
490 return false;
491 }
492 if (fann_save_train_to_fixed(train_data, filename.c_str(), decimal_point) == -1)
493 {
494 return false;
495 }
496 return true;
497 }
498
499 /* Method: shuffle_train_data
500
501 Shuffles training data, randomizing the order.
502 This is recommended for incremental training, while it have no influence during batch training.
503
504 This function appears in FANN >= 1.1.0.
505 */
506 void shuffle_train_data()
507 {
508 if (train_data != NULL)
509 {
510 fann_shuffle_train_data(train_data);
511 }
512 }
513
514 /* Method: merge_train_data
515
516 Merges the data into the data contained in the <training_data>.
517
518 This function appears in FANN >= 1.1.0.
519 */
520 void merge_train_data(const training_data &data)
521 {
522 fann_train_data *new_data = fann_merge_train_data(train_data, data.train_data);
523 if (new_data != NULL)
524 {
525 destroy_train();
526 train_data = new_data;
527 }
528 }
529
530 /* Method: length_train_data
531
532 Returns the number of training patterns in the <training_data>.
533
534 See also:
535 <num_input_train_data>, <num_output_train_data>, <fann_length_train_data>
536
537 This function appears in FANN >= 2.0.0.
538 */
539 unsigned int length_train_data()
540 {
541 if (train_data == NULL)
542 {
543 return 0;
544 }
545 else
546 {
547 return fann_length_train_data(train_data);
548 }
549 }
550
551 /* Method: num_input_train_data
552
553 Returns the number of inputs in each of the training patterns in the <training_data>.
554
555 See also:
556 <num_output_train_data>, <length_train_data>, <fann_num_input_train_data>
557
558 This function appears in FANN >= 2.0.0.
559 */
560 unsigned int num_input_train_data()
561 {
562 if (train_data == NULL)
563 {
564 return 0;
565 }
566 else
567 {
568 return fann_num_input_train_data(train_data);
569 }
570 }
571
572 /* Method: num_output_train_data
573
574 Returns the number of outputs in each of the training patterns in the <struct fann_train_data>.
575
576 See also:
577 <num_input_train_data>, <length_train_data>, <fann_num_output_train_data>
578
579 This function appears in FANN >= 2.0.0.
580 */
581 unsigned int num_output_train_data()
582 {
583 if (train_data == NULL)
584 {
585 return 0;
586 }
587 else
588 {
589 return fann_num_output_train_data(train_data);
590 }
591 }
592
593 /* Grant access to the encapsulated data since many situations
594 and applications creates the data from sources other than files
595 or uses the training data for testing and related functions */
596
597 /* Method: get_input
598
599 Returns:
600 A pointer to the array of input training data
601
602 See also:
603 <get_output>, <set_train_data>
604 */
605 fann_type **get_input()
606 {
607 if (train_data == NULL)
608 {
609 return NULL;
610 }
611 else
612 {
613 return train_data->input;
614 }
615 }
616
617 /* Method: get_output
618
619 Returns:
620 A pointer to the array of output training data
621
622 See also:
623 <get_input>, <set_train_data>
624 */
625 fann_type **get_output()
626 {
627 if (train_data == NULL)
628 {
629 return NULL;
630 }
631 else
632 {
633 return train_data->output;
634 }
635 }
636
637 /* Method: set_train_data
638
639 Set the training data to the input and output data provided.
640
641 A copy of the data is made so there are no restrictions on the
642 allocation of the input/output data and the caller is responsible
643 for the deallocation of the data pointed to by input and output.
644
645 Parameters:
646 num_data - The number of training data
647 num_input - The number of inputs per training data
648 num_output - The number of outputs per training data
649 input - The set of inputs (a pointer to an array of pointers to arrays of floating point data)
650 output - The set of desired outputs (a pointer to an array of pointers to arrays of floating point data)
651
652 See also:
653 <get_input>, <get_output>
654 */
655 void set_train_data(unsigned int num_data,
656 unsigned int num_input, fann_type **input,
657 unsigned int num_output, fann_type **output)
658 {
659 // Uses the allocation method used in fann
660 struct fann_train_data *data =
661 (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
662 data->input = (fann_type **)calloc(num_data, sizeof(fann_type *));
663 data->output = (fann_type **)calloc(num_data, sizeof(fann_type *));
664
665 data->num_data = num_data;
666 data->num_input = num_input;
667 data->num_output = num_output;
668
669 fann_type *data_input = (fann_type *)calloc(num_input*num_data, sizeof(fann_type));
670 fann_type *data_output = (fann_type *)calloc(num_output*num_data, sizeof(fann_type));
671
672 for (unsigned int i = 0; i < num_data; ++i)
673 {
674 data->input[i] = data_input;
675 data_input += num_input;
676 for (unsigned int j = 0; j < num_input; ++j)
677 {
678 data->input[i][j] = input[i][j];
679 }
680 data->output[i] = data_output;
681 data_output += num_output;
682 for (unsigned int j = 0; j < num_output; ++j)
683 {
684 data->output[i][j] = output[i][j];
685 }
686 }
687 set_train_data(data);
688 }
689
690
691
692 void set_train_data(const std::vector< std::vector<fann_type> >& input,
693 const std::vector< std::vector<fann_type> >& output)
694 {
695 unsigned int num_data=input.size();
696 assert(num_data);
697 assert(input.size()==output.size());
698 unsigned int num_input=input[0].size();
699 unsigned int num_output=output[0].size();
700
701 // Uses the allocation method used in fann
702 struct fann_train_data *data =
703 (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
704 data->input = (fann_type **)calloc(num_data, sizeof(fann_type *));
705 data->output = (fann_type **)calloc(num_data, sizeof(fann_type *));
706
707 data->num_data = num_data;
708 data->num_input = num_input;
709 data->num_output = num_output;
710
711 fann_type *data_input = (fann_type *)calloc(num_input*num_data, sizeof(fann_type));
712 fann_type *data_output = (fann_type *)calloc(num_output*num_data, sizeof(fann_type));
713
714 for (unsigned int i = 0; i < num_data; ++i)
715 {
716 data->input[i] = data_input;
717 data_input += num_input;
718 for (unsigned int j = 0; j < num_input; ++j)
719 {
720 data->input[i][j] = input[i][j];
721 }
722 data->output[i] = data_output;
723 data_output += num_output;
724 for (unsigned int j = 0; j < num_output; ++j)
725 {
726 data->output[i][j] = output[i][j];
727 }
728 }
729 set_train_data(data);
730 }
731
732 void set_train_data(const std::vector< std::vector< std::vector<fann_type> > >& input, unsigned int num_data)
733 {
734 assert(num_data);
735 assert(input.size());
736 unsigned int num_class=input.size();
737 assert(input[0].size());
738 unsigned int num_input=input[0][0].size();
739 unsigned int num_output=num_class;
740 //test
741// unsigned int num_output=1;
742
743 // Uses the allocation method used in fann
744 struct fann_train_data *data =
745 (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
746 data->input = (fann_type **)calloc(num_data, sizeof(fann_type *));
747 data->output = (fann_type **)calloc(num_data, sizeof(fann_type *));
748
749 data->num_data = num_data;
750 data->num_input = num_input;
751 data->num_output = num_output;
752
753 fann_type *data_input = (fann_type *)calloc(num_input*num_data, sizeof(fann_type));
754 fann_type *data_output = (fann_type *)calloc(num_output*num_data, sizeof(fann_type));
755
756 unsigned int isample=0;
757 for(int iclass=0;iclass<num_class;++iclass){
758 for(int csample=0;csample<input[iclass].size();++csample){
759 data->input[isample] = data_input;
760 data_input += num_input;
761 for(int iband=0;iband<input[iclass][csample].size();++iband){
762 assert(input[iclass][csample].size()==num_input);
763 data->input[isample][iband] = input[iclass][csample][iband];
764 }
765 data->output[isample] = data_output;
766 data_output += num_output;
767 for(int ic=0;ic<num_output;++ic){
768 //for single neuron output:
769// data->output[isample][ic]=2.0/(num_class-1)*(iclass-(num_class-1)/2.0);
770 if(ic==iclass)
771 data->output[isample][ic] = 1;
772 else
773 data->output[isample][ic] = -1;
774 }
775 ++isample;
776 }
777 }
778 set_train_data(data);
779 }
780
781 void set_train_data(const std::vector< Vector2d<fann_type> >& input, unsigned int num_data)
782 {
783 assert(num_data);
784 assert(input.size());
785 unsigned int num_class=input.size();
786 assert(input[0].size());
787 unsigned int num_input=input[0][0].size();
788 unsigned int num_output=num_class;
789 //test
790// unsigned int num_output=1;
791
792 // Uses the allocation method used in fann
793 struct fann_train_data *data =
794 (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
795 data->input = (fann_type **)calloc(num_data, sizeof(fann_type *));
796 data->output = (fann_type **)calloc(num_data, sizeof(fann_type *));
797
798 data->num_data = num_data;
799 data->num_input = num_input;
800 data->num_output = num_output;
801
802 fann_type *data_input = (fann_type *)calloc(num_input*num_data, sizeof(fann_type));
803 fann_type *data_output = (fann_type *)calloc(num_output*num_data, sizeof(fann_type));
804
805 unsigned int isample=0;
806 for(int iclass=0;iclass<num_class;++iclass){
807 for(int csample=0;csample<input[iclass].size();++csample){
808 data->input[isample] = data_input;
809 data_input += num_input;
810 for(int iband=0;iband<input[iclass][csample].size();++iband){
811 assert(input[iclass][csample].size()==num_input);
812 data->input[isample][iband] = input[iclass][csample][iband];
813 }
814 data->output[isample] = data_output;
815 data_output += num_output;
816 for(int ic=0;ic<num_output;++ic){
817 //for single neuron output:
818// data->output[isample][ic]=2.0/(num_class-1)*(iclass-(num_class-1)/2.0);
819 if(ic==iclass)
820 data->output[isample][ic] = 1;
821 else
822 data->output[isample][ic] = -1;
823 }
824 ++isample;
825 }
826 }
827 set_train_data(data);
828 }
829
830
831private:
832 /* Set the training data to the struct fann_training_data pointer.
833 The struct has to be allocated with malloc to be compatible
834 with fann_destroy. */
835 void set_train_data(struct fann_train_data *data)
836 {
837 destroy_train();
838 train_data = data;
839 }
840
841public:
842 /*********************************************************************/
843
844 /* Method: create_train_from_callback
845 Creates the training data struct from a user supplied function.
846 As the training data are numerable (data 1, data 2...), the user must write
847 a function that receives the number of the training data set (input,output)
848 and returns the set.
849
850 Parameters:
851 num_data - The number of training data
852 num_input - The number of inputs per training data
853 num_output - The number of outputs per training data
854 user_function - The user suplied function
855
856 Parameters for the user function:
857 num - The number of the training data set
858 num_input - The number of inputs per training data
859 num_output - The number of outputs per training data
860 input - The set of inputs
861 output - The set of desired outputs
862
863 See also:
864 <training_data::read_train_from_file>, <neural_net::train_on_data>,
865 <fann_create_train_from_callback>
866
867 This function appears in FANN >= 2.1.0
868 */
869 void create_train_from_callback(unsigned int num_data,
870 unsigned int num_input,
871 unsigned int num_output,
872 void (FANN_API *user_function)( unsigned int,
873 unsigned int,
874 unsigned int,
875 fann_type * ,
876 fann_type * ))
877 {
878 destroy_train();
879 train_data = fann_create_train_from_callback(num_data, num_input, num_output, user_function);
880 }
881
882 /* Method: scale_input_train_data
883
884 Scales the inputs in the training data to the specified range.
885
886 See also:
887 <scale_output_train_data>, <scale_train_data>, <fann_scale_input_train_data>
888
889 This function appears in FANN >= 2.0.0.
890 */
891 void scale_input_train_data(fann_type new_min, fann_type new_max)
892 {
893 if (train_data != NULL)
894 {
895 fann_scale_input_train_data(train_data, new_min, new_max);
896 }
897 }
898
899 /* Method: scale_output_train_data
900
901 Scales the outputs in the training data to the specified range.
902
903 See also:
904 <scale_input_train_data>, <scale_train_data>, <fann_scale_output_train_data>
905
906 This function appears in FANN >= 2.0.0.
907 */
908 void scale_output_train_data(fann_type new_min, fann_type new_max)
909 {
910 if (train_data != NULL)
911 {
912 fann_scale_output_train_data(train_data, new_min, new_max);
913 }
914 }
915
916 /* Method: scale_train_data
917
918 Scales the inputs and outputs in the training data to the specified range.
919
920 See also:
921 <scale_output_train_data>, <scale_input_train_data>, <fann_scale_train_data>
922
923 This function appears in FANN >= 2.0.0.
924 */
925 void scale_train_data(fann_type new_min, fann_type new_max)
926 {
927 if (train_data != NULL)
928 {
929 fann_scale_train_data(train_data, new_min, new_max);
930 }
931 }
932
933 /* Method: subset_train_data
934
935 Changes the training data to a subset, starting at position *pos*
936 and *length* elements forward. Use the copy constructor to work
937 on a new copy of the training data.
938
939 >FANN::training_data full_data_set;
940 >full_data_set.read_train_from_file("somefile.train");
941 >FANN::training_data *small_data_set = new FANN::training_data(full_data_set);
942 >small_data_set->subset_train_data(0, 2); // Only use first two
943 >// Use small_data_set ...
944 >delete small_data_set;
945
946 See also:
947 <fann_subset_train_data>
948
949 This function appears in FANN >= 2.0.0.
950 */
951 void subset_train_data(unsigned int pos, unsigned int length)
952 {
953 if (train_data != NULL)
954 {
955 struct fann_train_data *temp = fann_subset_train_data(train_data, pos, length);
956 destroy_train();
957 train_data = temp;
958 }
959 }
960
961 /*********************************************************************/
962
963 protected:
964 /* The neural_net class has direct access to the training data */
965 friend class neural_net;
966
967 /* Pointer to the encapsulated training data */
968 struct fann_train_data* train_data;
969 };
970
971 /*************************************************************************/
972
973 /* Class: neural_net
974
975 Encapsulation of a neural network <struct fann> and
976 associated C API functions.
977 */
979 {
980 public:
981 /* Constructor: neural_net
982
983 Default constructor creates an empty neural net.
984 Use one of the create functions to create the neural network.
985
986 See also:
987 <create_standard>, <create_sparse>, <create_shortcut>,
988 <create_standard_array>, <create_sparse_array>, <create_shortcut_array>
989 */
990 neural_net() : ann(NULL)
991 {
992 }
993
994 /* Destructor: ~neural_net
995
996 Provides automatic cleanup of data.
997 Define USE_VIRTUAL_DESTRUCTOR if you need the destructor to be virtual.
998
999 See also:
1000 <destroy>
1001 */
1002#ifdef USE_VIRTUAL_DESTRUCTOR
1003 virtual
1004#endif
1005 ~neural_net()
1006 {
1007 destroy();
1008 }
1009
1010 /* Method: destroy
1011
1012 Destructs the entire network. Called automatically by the destructor.
1013
1014 See also:
1015 <~neural_net>
1016 */
1017 void destroy()
1018 {
1019 if (ann != NULL)
1020 {
1021 user_context *user_data = static_cast<user_context *>(fann_get_user_data(ann));
1022 if (user_data != NULL)
1023 delete user_data;
1024
1025 fann_destroy(ann);
1026 ann = NULL;
1027 }
1028 }
1029
1030 /* Method: create_standard
1031
1032 Creates a standard fully connected backpropagation neural network.
1033
1034 There will be a bias neuron in each layer (except the output layer),
1035 and this bias neuron will be connected to all neurons in the next layer.
1036 When running the network, the bias nodes always emits 1.
1037
1038 Parameters:
1039 num_layers - The total number of layers including the input and the output layer.
1040 ... - Integer values determining the number of neurons in each layer starting with the
1041 input layer and ending with the output layer.
1042
1043 Returns:
1044 Boolean true if the network was created, false otherwise.
1045
1046 Example:
1047 >const unsigned int num_layers = 3;
1048 >const unsigned int num_input = 2;
1049 >const unsigned int num_hidden = 3;
1050 >const unsigned int num_output = 1;
1051 >
1052 >FANN::neural_net net;
1053 >net.create_standard(num_layers, num_input, num_hidden, num_output);
1054
1055 See also:
1056 <create_standard_array>, <create_sparse>, <create_shortcut>,
1057 <fann_create_standard_array>
1058
1059 This function appears in FANN >= 2.0.0.
1060 */
1061 bool create_standard(unsigned int num_layers, ...)
1062 {
1063 va_list layers;
1064 std::vector<unsigned int> arr(num_layers);//pk
1065 //unsigned int arr[num_layers];
1066
1067 va_start(layers, num_layers);
1068 /* bool status = create_standard_array(num_layers, */
1069 /* reinterpret_cast<const unsigned int *>(layers)); */
1070 for (unsigned int ii = 0; ii < num_layers; ii++)
1071 arr[ii] = va_arg(layers, unsigned int);
1072 bool status = create_standard_array(num_layers, &(arr[0]));//pk
1073 //bool status = create_standard_array(num_layers, arr);
1074 va_end(layers);
1075 return status;
1076 }
1077
1078 /* Method: create_standard_array
1079
1080 Just like <create_standard>, but with an array of layer sizes
1081 instead of individual parameters.
1082
1083 See also:
1084 <create_standard>, <create_sparse>, <create_shortcut>,
1085 <fann_create_standard>
1086
1087 This function appears in FANN >= 2.0.0.
1088 */
1089 bool create_standard_array(unsigned int num_layers, const unsigned int * layers)
1090 {
1091 destroy();
1092 ann = fann_create_standard_array(num_layers, layers);
1093 return (ann != NULL);
1094 }
1095
1096 /* Method: create_sparse
1097
1098 Creates a standard backpropagation neural network, which is not fully connected.
1099
1100 Parameters:
1101 connection_rate - The connection rate controls how many connections there will be in the
1102 network. If the connection rate is set to 1, the network will be fully
1103 connected, but if it is set to 0.5 only half of the connections will be set.
1104 A connection rate of 1 will yield the same result as <fann_create_standard>
1105 num_layers - The total number of layers including the input and the output layer.
1106 ... - Integer values determining the number of neurons in each layer starting with the
1107 input layer and ending with the output layer.
1108
1109 Returns:
1110 Boolean true if the network was created, false otherwise.
1111
1112 See also:
1113 <create_standard>, <create_sparse_array>, <create_shortcut>,
1114 <fann_create_sparse>
1115
1116 This function appears in FANN >= 2.0.0.
1117 */
1118 bool create_sparse(float connection_rate, unsigned int num_layers, ...)
1119 {
1120 va_list layers;
1121 std::vector<unsigned int> arr(num_layers);//pk
1122 //unsigned int arr[num_layers];
1123
1124 va_start(layers, num_layers);
1125 /* bool status = create_sparse_array(connection_rate, num_layers, */
1126 /* reinterpret_cast<const unsigned int *>(layers)); */
1127
1128 for (unsigned int ii = 0; ii < num_layers; ii++)
1129 arr[ii] = va_arg(layers, unsigned int);
1130 bool status = create_sparse_array(connection_rate, num_layers, &(arr[0]));//pk
1131 //bool status = create_sparse_array(connection_rate, num_layers, arr);
1132
1133 va_end(layers);
1134 return status;
1135 }
1136
1137 /* Method: create_sparse_array
1138 Just like <create_sparse>, but with an array of layer sizes
1139 instead of individual parameters.
1140
1141 See <create_sparse> for a description of the parameters.
1142
1143 See also:
1144 <create_standard>, <create_sparse>, <create_shortcut>,
1145 <fann_create_sparse_array>
1146
1147 This function appears in FANN >= 2.0.0.
1148 */
1149 bool create_sparse_array(float connection_rate,
1150 unsigned int num_layers, const unsigned int * layers)
1151 {
1152 destroy();
1153 ann = fann_create_sparse_array(connection_rate, num_layers, layers);
1154 return (ann != NULL);
1155 }
1156
1157 /* Method: create_shortcut
1158
1159 Creates a standard backpropagation neural network, which is not fully connected and which
1160 also has shortcut connections.
1161
1162 Shortcut connections are connections that skip layers. A fully connected network with shortcut
1163 connections, is a network where all neurons are connected to all neurons in later layers.
1164 Including direct connections from the input layer to the output layer.
1165
1166 See <create_standard> for a description of the parameters.
1167
1168 See also:
1169 <create_standard>, <create_sparse>, <create_shortcut_array>,
1170 <fann_create_shortcut>
1171
1172 This function appears in FANN >= 2.0.0.
1173 */
1174 bool create_shortcut(unsigned int num_layers, ...)
1175 {
1176 va_list layers;
1177 std::vector<unsigned int> arr(num_layers);//pk
1178 //unsigned int arr[num_layers];
1179
1180 va_start(layers, num_layers);
1181 /* bool status = create_shortcut_array(num_layers, */
1182 /* reinterpret_cast<const unsigned int *>(layers)); */
1183 for (unsigned int ii = 0; ii < num_layers; ii++)
1184 arr[ii] = va_arg(layers, unsigned int);
1185 bool status = create_shortcut_array(num_layers, &(arr[0]));//
1186 //bool status = create_shortcut_array(num_layers, arr);
1187
1188 va_end(layers);
1189 return status;
1190 }
1191
1192 /* Method: create_shortcut_array
1193
1194 Just like <create_shortcut>, but with an array of layer sizes
1195 instead of individual parameters.
1196
1197 See <create_standard_array> for a description of the parameters.
1198
1199 See also:
1200 <create_standard>, <create_sparse>, <create_shortcut>,
1201 <fann_create_shortcut_array>
1202
1203 This function appears in FANN >= 2.0.0.
1204 */
1205 bool create_shortcut_array(unsigned int num_layers,
1206 const unsigned int * layers)
1207 {
1208 destroy();
1209 ann = fann_create_shortcut_array(num_layers, layers);
1210 return (ann != NULL);
1211 }
1212
1213 /* Method: run
1214
1215 Will run input through the neural network, returning an array of outputs, the number of which being
1216 equal to the number of neurons in the output layer.
1217
1218 See also:
1219 <test>, <fann_run>
1220
1221 This function appears in FANN >= 1.0.0.
1222 */
1223 fann_type* run(fann_type *input)
1224 {
1225 if (ann == NULL)
1226 {
1227 return NULL;
1228 }
1229// for(int iband=0;iband<get_num_input();++iband)
1230// std::cout << (input)[iband] << " ";
1231// std::cout << std::endl;
1232 return fann_run(ann, input);
1233 }
1234
1235 std::vector<fann_type> run(std::vector<fann_type> input)
1236 {
1237 std::vector<fann_type> vresult;
1238 if (ann == NULL)
1239 {
1240 return vresult;
1241 }
1242 vresult.resize(get_num_output());
1243 fann_type* result;
1244 result=fann_run(ann,&(input[0]));
1245 for(int iout=0;iout<get_num_output();++iout)
1246 vresult[iout]=*(result++);
1247 return vresult;
1248 }
1249
1250 /* Method: randomize_weights
1251
1252 Give each connection a random weight between *min_weight* and *max_weight*
1253
1254 From the beginning the weights are random between -0.1 and 0.1.
1255
1256 See also:
1257 <init_weights>, <fann_randomize_weights>
1258
1259 This function appears in FANN >= 1.0.0.
1260 */
1261 void randomize_weights(fann_type min_weight, fann_type max_weight)
1262 {
1263 if (ann != NULL)
1264 {
1265 fann_randomize_weights(ann, min_weight, max_weight);
1266 }
1267 }
1268
1269 /* Method: init_weights
1270
1271 Initialize the weights using Widrow + Nguyen's algorithm.
1272
1273 This function behaves similarly to fann_randomize_weights. It will use the algorithm developed
1274 by Derrick Nguyen and Bernard Widrow to set the weights in such a way
1275 as to speed up training. This technique is not always successful, and in some cases can be less
1276 efficient than a purely random initialization.
1277
1278 The algorithm requires access to the range of the input data (ie, largest and smallest input),
1279 and therefore accepts a second argument, data, which is the training data that will be used to
1280 train the network.
1281
1282 See also:
1283 <randomize_weights>, <training_data::read_train_from_file>,
1284 <fann_init_weights>
1285
1286 This function appears in FANN >= 1.1.0.
1287 */
1288 void init_weights(const training_data &data)
1289 {
1290 if ((ann != NULL) && (data.train_data != NULL))
1291 {
1292 fann_init_weights(ann, data.train_data);
1293 }
1294 }
1295
1296 /* Method: print_connections
1297
1298 Will print the connections of the ann in a compact matrix, for easy viewing of the internals
1299 of the ann.
1300
1301 The output from fann_print_connections on a small (2 2 1) network trained on the xor problem
1302 >Layer / Neuron 012345
1303 >L 1 / N 3 BBa...
1304 >L 1 / N 4 BBA...
1305 >L 1 / N 5 ......
1306 >L 2 / N 6 ...BBA
1307 >L 2 / N 7 ......
1308
1309 This network have five real neurons and two bias neurons. This gives a total of seven neurons
1310 named from 0 to 6. The connections between these neurons can be seen in the matrix. "." is a
1311 place where there is no connection, while a character tells how strong the connection is on a
1312 scale from a-z. The two real neurons in the hidden layer (neuron 3 and 4 in layer 1) has
1313 connection from the three neurons in the previous layer as is visible in the first two lines.
1314 The output neuron (6) has connections form the three neurons in the hidden layer 3 - 5 as is
1315 visible in the fourth line.
1316
1317 To simplify the matrix output neurons is not visible as neurons that connections can come from,
1318 and input and bias neurons are not visible as neurons that connections can go to.
1319
1320 This function appears in FANN >= 1.2.0.
1321 */
1322 void print_connections()
1323 {
1324 if (ann != NULL)
1325 {
1326 fann_print_connections(ann);
1327 }
1328 }
1329
1330 /* Method: create_from_file
1331
1332 Constructs a backpropagation neural network from a configuration file,
1333 which have been saved by <save>.
1334
1335 See also:
1336 <save>, <save_to_fixed>, <fann_create_from_file>
1337
1338 This function appears in FANN >= 1.0.0.
1339 */
1340 bool create_from_file(const std::string &configuration_file)
1341 {
1342 destroy();
1343 ann = fann_create_from_file(configuration_file.c_str());
1344 return (ann != NULL);
1345 }
1346
1347 /* Method: save
1348
1349 Save the entire network to a configuration file.
1350
1351 The configuration file contains all information about the neural network and enables
1352 <create_from_file> to create an exact copy of the neural network and all of the
1353 parameters associated with the neural network.
1354
1355 These two parameters (<set_callback>, <set_error_log>) are *NOT* saved
1356 to the file because they cannot safely be ported to a different location. Also temporary
1357 parameters generated during training like <get_MSE> is not saved.
1358
1359 Return:
1360 The function returns 0 on success and -1 on failure.
1361
1362 See also:
1363 <create_from_file>, <save_to_fixed>, <fann_save>
1364
1365 This function appears in FANN >= 1.0.0.
1366 */
1367 bool save(const std::string &configuration_file)
1368 {
1369 if (ann == NULL)
1370 {
1371 return false;
1372 }
1373 if (fann_save(ann, configuration_file.c_str()) == -1)
1374 {
1375 return false;
1376 }
1377 return true;
1378 }
1379
1380 /* Method: save_to_fixed
1381
1382 Saves the entire network to a configuration file.
1383 But it is saved in fixed point format no matter which
1384 format it is currently in.
1385
1386 This is usefull for training a network in floating points,
1387 and then later executing it in fixed point.
1388
1389 The function returns the bit position of the fix point, which
1390 can be used to find out how accurate the fixed point network will be.
1391 A high value indicates high precision, and a low value indicates low
1392 precision.
1393
1394 A negative value indicates very low precision, and a very
1395 strong possibility for overflow.
1396 (the actual fix point will be set to 0, since a negative
1397 fix point does not make sence).
1398
1399 Generally, a fix point lower than 6 is bad, and should be avoided.
1400 The best way to avoid this, is to have less connections to each neuron,
1401 or just less neurons in each layer.
1402
1403 The fixed point use of this network is only intended for use on machines that
1404 have no floating point processor, like an iPAQ. On normal computers the floating
1405 point version is actually faster.
1406
1407 See also:
1408 <create_from_file>, <save>, <fann_save_to_fixed>
1409
1410 This function appears in FANN >= 1.0.0.
1411 */
1412 int save_to_fixed(const std::string &configuration_file)
1413 {
1414 int fixpoint = 0;
1415 if (ann != NULL)
1416 {
1417 fixpoint = fann_save_to_fixed(ann, configuration_file.c_str());
1418 }
1419 return fixpoint;
1420 }
1421
1422#ifndef FIXEDFANN
1423 /* Method: train
1424
1425 Train one iteration with a set of inputs, and a set of desired outputs.
1426 This training is always incremental training (see <FANN::training_algorithm_enum>),
1427 since only one pattern is presented.
1428
1429 Parameters:
1430 ann - The neural network structure
1431 input - an array of inputs. This array must be exactly <fann_get_num_input> long.
1432 desired_output - an array of desired outputs. This array must be exactly <fann_get_num_output> long.
1433
1434 See also:
1435 <train_on_data>, <train_epoch>, <fann_train>
1436
1437 This function appears in FANN >= 1.0.0.
1438 */
1439 void train(fann_type *input, fann_type *desired_output)
1440 {
1441 if (ann != NULL)
1442 {
1443 fann_train(ann, input, desired_output);
1444 }
1445 }
1446
1447 /* Method: train_epoch
1448 Train one epoch with a set of training data.
1449
1450 Train one epoch with the training data stored in data. One epoch is where all of
1451 the training data is considered exactly once.
1452
1453 This function returns the MSE error as it is calculated either before or during
1454 the actual training. This is not the actual MSE after the training epoch, but since
1455 calculating this will require to go through the entire training set once more, it is
1456 more than adequate to use this value during training.
1457
1458 The training algorithm used by this function is chosen by the <fann_set_training_algorithm>
1459 function.
1460
1461 See also:
1462 <train_on_data>, <test_data>, <fann_train_epoch>
1463
1464 This function appears in FANN >= 1.2.0.
1465 */
1466 float train_epoch(const training_data &data)
1467 {
1468 float mse = 0.0f;
1469 if ((ann != NULL) && (data.train_data != NULL))
1470 {
1471 mse = fann_train_epoch(ann, data.train_data);
1472 }
1473 return mse;
1474 }
1475
1476 /* Method: train_on_data
1477
1478 Trains on an entire dataset, for a period of time.
1479
1480 This training uses the training algorithm chosen by <set_training_algorithm>,
1481 and the parameters set for these training algorithms.
1482
1483 Parameters:
1484 ann - The neural network
1485 data - The data, which should be used during training
1486 max_epochs - The maximum number of epochs the training should continue
1487 epochs_between_reports - The number of epochs between printing a status report to stdout.
1488 A value of zero means no reports should be printed.
1489 desired_error - The desired <get_MSE> or <get_bit_fail>, depending on which stop function
1490 is chosen by <set_train_stop_function>.
1491
1492 Instead of printing out reports every epochs_between_reports, a callback function can be called
1493 (see <set_callback>).
1494
1495 See also:
1496 <train_on_file>, <train_epoch>, <fann_train_on_data>
1497
1498 This function appears in FANN >= 1.0.0.
1499 */
1500 void train_on_data(const training_data &data, unsigned int max_epochs,
1501 unsigned int epochs_between_reports, float desired_error)
1502 {
1503 if ((ann != NULL) && (data.train_data != NULL))
1504 {
1505 fann_train_on_data(ann, data.train_data, max_epochs,
1506 epochs_between_reports, desired_error);
1507 }
1508 }
1509
1510
1511
1512 void train_on_data(const std::vector< std::vector<fann_type> >& input,
1513 const std::vector< std::vector<fann_type> >& output,
1514 bool initWeights,
1515 unsigned int max_epochs,
1516 unsigned int epochs_between_reports,
1517 float desired_error)
1518 {
1519 if ((ann != NULL))
1520 {
1521 training_data data;
1522 data.set_train_data(input,output);
1523 if(data.train_data != NULL){
1524 if(initWeights)
1525 init_weights(data);
1526 fann_train_on_data(ann, data.train_data, max_epochs,
1527 epochs_between_reports, desired_error);
1528 }
1529 }
1530 }
1531
1532 void train_on_data(const std::vector< Vector2d<fann_type> >& input,
1533 unsigned int num_data,
1534 bool initWeights,
1535 unsigned int max_epochs,
1536 unsigned int epochs_between_reports,
1537 float desired_error)
1538 {
1539 if ((ann != NULL))
1540 {
1541 training_data data;
1542 data.set_train_data(input,num_data);
1543 if(data.train_data != NULL){
1544 if(initWeights)
1545 init_weights(data);
1546 fann_train_on_data(ann, data.train_data, max_epochs,
1547 epochs_between_reports, desired_error);
1548 }
1549 }
1550 }
1551
1552 //cross validation for classification
1553 float cross_validation(std::vector< Vector2d<fann_type> >& trainingFeatures,
1554 unsigned int ntraining,
1555 unsigned short cv,
1556 unsigned int max_epochs,
1557 float desired_error,
1558 std::vector<unsigned short>& referenceVector,
1559 std::vector<unsigned short>& outputVector,
1560 short verbose=0)
1561 {
1562 referenceVector.clear();
1563 outputVector.clear();
1564 assert(cv<ntraining);
1565 float rmse=0;
1566 int nclass=trainingFeatures.size();
1567 std::vector< Vector2d<float> > testFeatures(nclass);
1568 int testclass=0;//class to leave out
1569 int testsample=0;//sample to leave out
1570 int nrun=(cv>1)? cv : ntraining;
1571 if(nrun>ntraining)
1572 nrun=ntraining;
1573 for(int irun=0;irun<nrun;++irun){
1574 if(verbose>1)
1575 std::cout << "run " << irun << std::endl;
1576 //reset training sample from last run
1577 if(verbose>1)
1578 std::cout << "reset training sample from last run" << std::endl;
1579 for(int iclass=0;iclass<nclass;++iclass){
1580 while(testFeatures[iclass].size()){
1581 trainingFeatures[iclass].push_back(testFeatures[iclass].back());
1582 testFeatures[iclass].pop_back();
1583 }
1584 if(verbose>1){
1585 std::cout << "training size " << iclass << ": " << trainingFeatures[iclass].size() << std::endl;
1586 std::cout << "test size " << iclass << ": " << testFeatures[iclass].size() << std::endl;
1587 }
1588 assert(trainingFeatures[iclass].size());
1589 }
1590 //create test sample
1591 if(verbose>1)
1592 std::cout << "create test sample" << std::endl;
1593 unsigned int nsample=0;
1594 int ntest=(cv>1)? ntraining/cv : 1; //n-fold cross validation or leave-one-out
1595 while(nsample<ntest){
1596 // if(index>=trainingFeatures[testclass].size()){
1597 // index=0;
1598 // }
1599 testFeatures[testclass].push_back(trainingFeatures[testclass][0]);
1600 trainingFeatures[testclass].erase(trainingFeatures[testclass].begin());
1601 if(!trainingFeatures[testclass].size())
1602 std::cout << "Error: testclass " << testclass << " has no training" << std::endl;
1603 assert(trainingFeatures[testclass].size());
1604 ++nsample;
1605 if(static_cast<float>(trainingFeatures[testclass].size())/static_cast<float>(testFeatures[testclass].size())<=(cv-1)){
1606 if(verbose>1){
1607 std::cout << "training size " << testclass << ": " << trainingFeatures[testclass].size() << std::endl;
1608 std::cout << "test size " << testclass << ": " << testFeatures[testclass].size() << std::endl;
1609 }
1610 testclass=(testclass+1)%nclass;
1611 }
1612 }
1613 assert(nsample==ntest);
1614 //training with left out training set
1615 if(verbose>1)
1616 std::cout << std::endl << "Set training data" << std::endl;
1617 bool initWeights=true;
1618 unsigned int epochs_between_reports=0;
1619 train_on_data(trainingFeatures,ntraining-ntest,initWeights, max_epochs,
1620 epochs_between_reports, desired_error);
1621 //cross validation with testFeatures
1622 if(verbose>1)
1623 std::cout << std::endl << "Cross validation" << std::endl;
1624
1625 std::vector<float> result(nclass);
1626 int maxClass=-1;
1627 for(int iclass=0;iclass<testFeatures.size();++iclass){
1628 assert(trainingFeatures[iclass].size());
1629 for(int isample=0;isample<testFeatures[iclass].size();++isample){
1630 result=run(testFeatures[iclass][isample]);
1631 //search class with maximum posterior probability
1632 float maxP=-1;
1633 for(int ic=0;ic<nclass;++ic){
1634 float pv=(result[ic]+1.0)/2.0;//bring back to scale [0,1]
1635 if(pv>maxP){
1636 maxP=pv;
1637 maxClass=ic;
1638 }
1639 }
1640 assert(maxP>=0);
1641 referenceVector.push_back(iclass);
1642 outputVector.push_back(maxClass);
1643 }
1644 }
1645
1646 // rmse+=test_data(testFeatures,ntest);
1647 // if(verbose>1)
1648 // std::cout << std::endl << "rmse: " << rmse << std::endl;
1649 }
1650 // rmse/=nrun;
1651 //reset from very last run
1652 for(int iclass=0;iclass<nclass;++iclass){
1653 while(testFeatures[iclass].size()){
1654 trainingFeatures[iclass].push_back(testFeatures[iclass].back());
1655 testFeatures[iclass].pop_back();
1656 }
1657 }
1658 // return(rmse);
1659 return 0;
1660 }
1661
1662 //cross validation for regresssion
1663 float cross_validation(std::vector< std::vector<fann_type> >& input,
1664 std::vector< std::vector<fann_type> >& output,
1665 unsigned short cv,
1666 unsigned int max_epochs,
1667 float desired_error,
1668 std::vector< std::vector<fann_type> >& referenceVector,
1669 std::vector< std::vector<fann_type> >& outputVector,
1670 short verbose=0)
1671 {
1672 assert(input.size());
1673 assert(output.size()==input.size());
1674 unsigned int ntraining=input.size();
1675 unsigned int noutput=output[0].size();
1676 referenceVector.clear();
1677 outputVector.clear();
1678 assert(cv<ntraining);
1679 float rmse=0;
1680 std::vector< std::vector<fann_type> > testInput;
1681 std::vector< std::vector<fann_type> > testOutput;
1682 int testsample=0;//sample to leave out
1683 int nrun=(cv>1)? cv : ntraining;
1684 if(nrun>ntraining)
1685 nrun=ntraining;
1686 for(int irun=0;irun<nrun;++irun){
1687 if(verbose>1)
1688 std::cout << "run " << irun << std::endl;
1689 //reset training sample from last run
1690 if(verbose>1)
1691 std::cout << "reset training sample from last run" << std::endl;
1692 while(testInput.size()){
1693 input.push_back(testInput.back());
1694 testInput.pop_back();
1695 }
1696 while(testOutput.size()){
1697 output.push_back(testOutput.back());
1698 testOutput.pop_back();
1699 }
1700 assert(testInput.size()==testOutput.size());
1701 if(verbose>1){
1702 std::cout << "training size: " << input.size() << std::endl;
1703 std::cout << "test size: " << testInput.size() << std::endl;
1704 }
1705 assert(input.size());
1706 //create test sample
1707 if(verbose>1)
1708 std::cout << "create test sample" << std::endl;
1709 unsigned int nsample=0;
1710 int ntest=(cv>1)? ntraining/cv : 1; //n-fold cross validation or leave-one-out
1711 while(nsample<ntest){
1712 testInput.push_back(input[0]);
1713 testOutput.push_back(output[0]);
1714 input.erase(input.begin());
1715 output.erase(output.begin());
1716 assert(input.size());
1717 assert(output.size());
1718 assert(input.size()==output.size());
1719 ++nsample;
1720 }
1721 assert(nsample==ntest);
1722 assert(testInput.size()==testOutput.size());
1723 //training with left out training set
1724 if(verbose>1)
1725 std::cout << std::endl << "Set training data" << std::endl;
1726 bool initWeights=true;
1727 unsigned int epochs_between_reports=0;
1728
1729 train_on_data(input,output,initWeights, max_epochs,
1730 epochs_between_reports, desired_error);
1731 //cross validation with testFeatures
1732 if(verbose>1)
1733 std::cout << std::endl << "Cross validation" << std::endl;
1734
1735 std::vector<fann_type> result(noutput);
1736 for(int isample=0;isample<testInput.size();++isample){
1737 result=run(testInput[isample]);
1738 referenceVector.push_back(testOutput[isample]);
1739 outputVector.push_back(result);
1740 }
1741 }
1742 //reset from very last run
1743 while(testInput.size()){
1744 input.push_back(testInput.back());
1745 testInput.pop_back();
1746 }
1747 while(testOutput.size()){
1748 output.push_back(testOutput.back());
1749 testOutput.pop_back();
1750 }
1751 return 0;
1752 }
1753
1754 /* Method: train_on_file
1755
1756 Does the same as <train_on_data>, but reads the training data directly from a file.
1757
1758 See also:
1759 <train_on_data>, <fann_train_on_file>
1760
1761 This function appears in FANN >= 1.0.0.
1762 */
1763 void train_on_file(const std::string &filename, unsigned int max_epochs,
1764 unsigned int epochs_between_reports, float desired_error)
1765 {
1766 if (ann != NULL)
1767 {
1768 fann_train_on_file(ann, filename.c_str(),
1769 max_epochs, epochs_between_reports, desired_error);
1770 }
1771 }
1772#endif /* NOT FIXEDFANN */
1773
1774 /* Method: test
1775
1776 Test with a set of inputs, and a set of desired outputs.
1777 This operation updates the mean square error, but does not
1778 change the network in any way.
1779
1780 See also:
1781 <test_data>, <train>, <fann_test>
1782
1783 This function appears in FANN >= 1.0.0.
1784 */
1785 fann_type * test(fann_type *input, fann_type *desired_output)
1786 {
1787 fann_type * output = NULL;
1788 if (ann != NULL)
1789 {
1790 output = fann_test(ann, input, desired_output);
1791 }
1792 return output;
1793 }
1794
1795 /* Method: test_data
1796
1797 Test a set of training data and calculates the MSE for the training data.
1798
1799 This function updates the MSE and the bit fail values.
1800
1801 See also:
1802 <test>, <get_MSE>, <get_bit_fail>, <fann_test_data>
1803
1804 This function appears in FANN >= 1.2.0.
1805 */
1806 float test_data(const training_data &data)
1807 {
1808 float mse = 0.0f;
1809 if ((ann != NULL) && (data.train_data != NULL))
1810 {
1811 mse = fann_test_data(ann, data.train_data);
1812 }
1813 return mse;
1814 }
1815
1816 float test_data(const std::vector< Vector2d<fann_type> >& input, unsigned int num_data)
1817 {
1818 assert(num_data);
1819 assert(input.size());
1820 unsigned int num_class=input.size();
1821 assert(input[0].size());
1822 unsigned int num_input=input[0][0].size();
1823 unsigned int num_output=num_class;
1824 struct fann_train_data *data =
1825 (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
1826 data->input = (fann_type **)calloc(num_data, sizeof(fann_type *));
1827 data->output = (fann_type **)calloc(num_data, sizeof(fann_type *));
1828
1829 data->num_data = num_data;
1830 data->num_input = num_input;
1831 data->num_output = num_output;
1832
1833 fann_type *data_input = (fann_type *)calloc(num_input*num_data, sizeof(fann_type));
1834 fann_type *data_output = (fann_type *)calloc(num_output*num_data, sizeof(fann_type));
1835
1836 unsigned int isample=0;
1837 for(int iclass=0;iclass<num_class;++iclass){
1838 for(int csample=0;csample<input[iclass].size();++csample){
1839 data->input[isample] = data_input;
1840 data_input += num_input;
1841 for(int iband=0;iband<input[iclass][csample].size();++iband){
1842 assert(input[iclass][csample].size()==num_input);
1843 data->input[isample][iband] = input[iclass][csample][iband];
1844 }
1845 data->output[isample] = data_output;
1846 data_output += num_output;
1847 for(int ic=0;ic<num_output;++ic){
1848 //for single neuron output:
1849// data->output[isample][ic]=2.0/(num_class-1)*(iclass-(num_class-1)/2.0);
1850 if(ic==iclass)
1851 data->output[isample][ic] = 1;
1852 else
1853 data->output[isample][ic] = -1;
1854 }
1855 ++isample;
1856 }
1857 }
1858 FANN::training_data trainingData;
1859 trainingData.train_data = data;
1860 return test_data(trainingData);
1861 }
1862
1863
1864 /* Method: get_MSE
1865 Reads the mean square error from the network.
1866
1867 Reads the mean square error from the network. This value is calculated during
1868 training or testing, and can therefore sometimes be a bit off if the weights
1869 have been changed since the last calculation of the value.
1870
1871 See also:
1872 <test_data>, <fann_get_MSE>
1873
1874 This function appears in FANN >= 1.1.0.
1875 */
1876 float get_MSE()
1877 {
1878 float mse = 0.0f;
1879 if (ann != NULL)
1880 {
1881 mse = fann_get_MSE(ann);
1882 }
1883 return mse;
1884 }
1885
1886 /* Method: reset_MSE
1887
1888 Resets the mean square error from the network.
1889
1890 This function also resets the number of bits that fail.
1891
1892 See also:
1893 <get_MSE>, <get_bit_fail_limit>, <fann_reset_MSE>
1894
1895 This function appears in FANN >= 1.1.0
1896 */
1897 void reset_MSE()
1898 {
1899 if (ann != NULL)
1900 {
1901 fann_reset_MSE(ann);
1902 }
1903 }
1904
1905 /* Method: set_callback
1906
1907 Sets the callback function for use during training. The user_data is passed to
1908 the callback. It can point to arbitrary data that the callback might require and
1909 can be NULL if it is not used.
1910
1911 See <FANN::callback_type> for more information about the callback function.
1912
1913 The default callback function simply prints out some status information.
1914
1915 This function appears in FANN >= 2.0.0.
1916 */
1917 void set_callback(callback_type callback, void *user_data)
1918 {
1919 if (ann != NULL)
1920 {
1921 // Allocated data is also deleted in the destroy method called by the destructor
1922 user_context *user_instance = static_cast<user_context *>(fann_get_user_data(ann));
1923 if (user_instance != NULL)
1924 delete user_instance;
1925
1926 user_instance = new user_context();
1927 user_instance->user_callback = callback;
1928 user_instance->user_data = user_data;
1929 user_instance->net = this;
1930 fann_set_user_data(ann, user_instance);
1931
1932 if (callback != NULL)
1933 fann_set_callback(ann, &FANN::neural_net::internal_callback);
1934 else
1935 fann_set_callback(ann, NULL);
1936 }
1937 }
1938
1939 /* Method: print_parameters
1940
1941 Prints all of the parameters and options of the neural network
1942
1943 See also:
1944 <fann_print_parameters>
1945
1946 This function appears in FANN >= 1.2.0.
1947 */
1948 void print_parameters()
1949 {
1950 if (ann != NULL)
1951 {
1952 fann_print_parameters(ann);
1953 }
1954 }
1955
1956 /* Method: get_training_algorithm
1957
1958 Return the training algorithm as described by <FANN::training_algorithm_enum>.
1959 This training algorithm is used by <train_on_data> and associated functions.
1960
1961 Note that this algorithm is also used during <cascadetrain_on_data>, although only
1962 FANN::TRAIN_RPROP and FANN::TRAIN_QUICKPROP is allowed during cascade training.
1963
1964 The default training algorithm is FANN::TRAIN_RPROP.
1965
1966 See also:
1967 <set_training_algorithm>, <FANN::training_algorithm_enum>,
1968 <fann_get_training_algorithm>
1969
1970 This function appears in FANN >= 1.0.0.
1971 */
1972 training_algorithm_enum get_training_algorithm()
1973 {
1974 fann_train_enum training_algorithm = FANN_TRAIN_INCREMENTAL;
1975 if (ann != NULL)
1976 {
1977 training_algorithm = fann_get_training_algorithm(ann);
1978 }
1979 return static_cast<training_algorithm_enum>(training_algorithm);
1980 }
1981
1982 /* Method: set_training_algorithm
1983
1984 Set the training algorithm.
1985
1986 More info available in <get_training_algorithm>
1987
1988 This function appears in FANN >= 1.0.0.
1989 */
1990 void set_training_algorithm(training_algorithm_enum training_algorithm)
1991 {
1992 if (ann != NULL)
1993 {
1994 fann_set_training_algorithm(ann,
1995 static_cast<fann_train_enum>(training_algorithm));
1996 }
1997 }
1998
1999 /* Method: get_learning_rate
2000
2001 Return the learning rate.
2002
2003 The learning rate is used to determine how aggressive training should be for some of the
2004 training algorithms (FANN::TRAIN_INCREMENTAL, FANN::TRAIN_BATCH, FANN::TRAIN_QUICKPROP).
2005 Do however note that it is not used in FANN::TRAIN_RPROP.
2006
2007 The default learning rate is 0.7.
2008
2009 See also:
2010 <set_learning_rate>, <set_training_algorithm>,
2011 <fann_get_learning_rate>
2012
2013 This function appears in FANN >= 1.0.0.
2014 */
2015 float get_learning_rate()
2016 {
2017 float learning_rate = 0.0f;
2018 if (ann != NULL)
2019 {
2020 learning_rate = fann_get_learning_rate(ann);
2021 }
2022 return learning_rate;
2023 }
2024
2025 /* Method: set_learning_rate
2026
2027 Set the learning rate.
2028
2029 More info available in <get_learning_rate>
2030
2031 This function appears in FANN >= 1.0.0.
2032 */
2033 void set_learning_rate(float learning_rate)
2034 {
2035 if (ann != NULL)
2036 {
2037 fann_set_learning_rate(ann, learning_rate);
2038 }
2039 }
2040
2041 /*************************************************************************************************************/
2042
2043 /* Method: get_activation_function
2044
2045 Get the activation function for neuron number *neuron* in layer number *layer*,
2046 counting the input layer as layer 0.
2047
2048 It is not possible to get activation functions for the neurons in the input layer.
2049
2050 Information about the individual activation functions is available at <FANN::activation_function_enum>.
2051
2052 Returns:
2053 The activation function for the neuron or -1 if the neuron is not defined in the neural network.
2054
2055 See also:
2056 <set_activation_function_layer>, <set_activation_function_hidden>,
2057 <set_activation_function_output>, <set_activation_steepness>,
2058 <set_activation_function>, <fann_get_activation_function>
2059
2060 This function appears in FANN >= 2.1.0
2061 */
2062 activation_function_enum get_activation_function(int layer, int neuron)
2063 {
2064 unsigned int activation_function = 0;
2065 if (ann != NULL)
2066 {
2067 activation_function = fann_get_activation_function(ann, layer, neuron);
2068 }
2069 return static_cast<activation_function_enum>(activation_function);
2070 }
2071
2072 /* Method: set_activation_function
2073
2074 Set the activation function for neuron number *neuron* in layer number *layer*,
2075 counting the input layer as layer 0.
2076
2077 It is not possible to set activation functions for the neurons in the input layer.
2078
2079 When choosing an activation function it is important to note that the activation
2080 functions have different range. FANN::SIGMOID is e.g. in the 0 - 1 range while
2081 FANN::SIGMOID_SYMMETRIC is in the -1 - 1 range and FANN::LINEAR is unbound.
2082
2083 Information about the individual activation functions is available at <FANN::activation_function_enum>.
2084
2085 The default activation function is FANN::SIGMOID_STEPWISE.
2086
2087 See also:
2088 <set_activation_function_layer>, <set_activation_function_hidden>,
2089 <set_activation_function_output>, <set_activation_steepness>,
2090 <get_activation_function>, <fann_set_activation_function>
2091
2092 This function appears in FANN >= 2.0.0.
2093 */
2094 void set_activation_function(activation_function_enum activation_function, int layer, int neuron)
2095 {
2096 if (ann != NULL)
2097 {
2098 fann_set_activation_function(ann,
2099 static_cast<fann_activationfunc_enum>(activation_function), layer, neuron);
2100 }
2101 }
2102
2103 /* Method: set_activation_function_layer
2104
2105 Set the activation function for all the neurons in the layer number *layer*,
2106 counting the input layer as layer 0.
2107
2108 It is not possible to set activation functions for the neurons in the input layer.
2109
2110 See also:
2111 <set_activation_function>, <set_activation_function_hidden>,
2112 <set_activation_function_output>, <set_activation_steepness_layer>,
2113 <fann_set_activation_function_layer>
2114
2115 This function appears in FANN >= 2.0.0.
2116 */
2117 void set_activation_function_layer(activation_function_enum activation_function, int layer)
2118 {
2119 if (ann != NULL)
2120 {
2121 fann_set_activation_function_layer(ann,
2122 static_cast<fann_activationfunc_enum>(activation_function), layer);
2123 }
2124 }
2125
2126 /* Method: set_activation_function_hidden
2127
2128 Set the activation function for all of the hidden layers.
2129
2130 See also:
2131 <set_activation_function>, <set_activation_function_layer>,
2132 <set_activation_function_output>, <set_activation_steepness_hidden>,
2133 <fann_set_activation_function_hidden>
2134
2135 This function appears in FANN >= 1.0.0.
2136 */
2137 void set_activation_function_hidden(activation_function_enum activation_function)
2138 {
2139 if (ann != NULL)
2140 {
2141 fann_set_activation_function_hidden(ann,
2142 static_cast<fann_activationfunc_enum>(activation_function));
2143 }
2144 }
2145
2146 /* Method: set_activation_function_output
2147
2148 Set the activation function for the output layer.
2149
2150 See also:
2151 <set_activation_function>, <set_activation_function_layer>,
2152 <set_activation_function_hidden>, <set_activation_steepness_output>,
2153 <fann_set_activation_function_output>
2154
2155 This function appears in FANN >= 1.0.0.
2156 */
2157 void set_activation_function_output(activation_function_enum activation_function)
2158 {
2159 if (ann != NULL)
2160 {
2161 fann_set_activation_function_output(ann,
2162 static_cast<fann_activationfunc_enum>(activation_function));
2163 }
2164 }
2165
2166 /* Method: get_activation_steepness
2167
2168 Get the activation steepness for neuron number *neuron* in layer number *layer*,
2169 counting the input layer as layer 0.
2170
2171 It is not possible to get activation steepness for the neurons in the input layer.
2172
2173 The steepness of an activation function says something about how fast the activation function
2174 goes from the minimum to the maximum. A high value for the activation function will also
2175 give a more agressive training.
2176
2177 When training neural networks where the output values should be at the extremes (usually 0 and 1,
2178 depending on the activation function), a steep activation function can be used (e.g. 1.0).
2179
2180 The default activation steepness is 0.5.
2181
2182 Returns:
2183 The activation steepness for the neuron or -1 if the neuron is not defined in the neural network.
2184
2185 See also:
2186 <set_activation_steepness_layer>, <set_activation_steepness_hidden>,
2187 <set_activation_steepness_output>, <set_activation_function>,
2188 <set_activation_steepness>, <fann_get_activation_steepness>
2189
2190 This function appears in FANN >= 2.1.0
2191 */
2192 fann_type get_activation_steepness(int layer, int neuron)
2193 {
2194 fann_type activation_steepness = 0;
2195 if (ann != NULL)
2196 {
2197 activation_steepness = fann_get_activation_steepness(ann, layer, neuron);
2198 }
2199 return activation_steepness;
2200 }
2201
2202 /* Method: set_activation_steepness
2203
2204 Set the activation steepness for neuron number *neuron* in layer number *layer*,
2205 counting the input layer as layer 0.
2206
2207 It is not possible to set activation steepness for the neurons in the input layer.
2208
2209 The steepness of an activation function says something about how fast the activation function
2210 goes from the minimum to the maximum. A high value for the activation function will also
2211 give a more agressive training.
2212
2213 When training neural networks where the output values should be at the extremes (usually 0 and 1,
2214 depending on the activation function), a steep activation function can be used (e.g. 1.0).
2215
2216 The default activation steepness is 0.5.
2217
2218 See also:
2219 <set_activation_steepness_layer>, <set_activation_steepness_hidden>,
2220 <set_activation_steepness_output>, <set_activation_function>,
2221 <get_activation_steepness>, <fann_set_activation_steepness>
2222
2223 This function appears in FANN >= 2.0.0.
2224 */
2225 void set_activation_steepness(fann_type steepness, int layer, int neuron)
2226 {
2227 if (ann != NULL)
2228 {
2229 fann_set_activation_steepness(ann, steepness, layer, neuron);
2230 }
2231 }
2232
2233 /* Method: set_activation_steepness_layer
2234
2235 Set the activation steepness all of the neurons in layer number *layer*,
2236 counting the input layer as layer 0.
2237
2238 It is not possible to set activation steepness for the neurons in the input layer.
2239
2240 See also:
2241 <set_activation_steepness>, <set_activation_steepness_hidden>,
2242 <set_activation_steepness_output>, <set_activation_function_layer>,
2243 <fann_set_activation_steepness_layer>
2244
2245 This function appears in FANN >= 2.0.0.
2246 */
2247 void set_activation_steepness_layer(fann_type steepness, int layer)
2248 {
2249 if (ann != NULL)
2250 {
2251 fann_set_activation_steepness_layer(ann, steepness, layer);
2252 }
2253 }
2254
2255 /* Method: set_activation_steepness_hidden
2256
2257 Set the steepness of the activation steepness in all of the hidden layers.
2258
2259 See also:
2260 <set_activation_steepness>, <set_activation_steepness_layer>,
2261 <set_activation_steepness_output>, <set_activation_function_hidden>,
2262 <fann_set_activation_steepness_hidden>
2263
2264 This function appears in FANN >= 1.2.0.
2265 */
2266 void set_activation_steepness_hidden(fann_type steepness)
2267 {
2268 if (ann != NULL)
2269 {
2270 fann_set_activation_steepness_hidden(ann, steepness);
2271 }
2272 }
2273
2274 /* Method: set_activation_steepness_output
2275
2276 Set the steepness of the activation steepness in the output layer.
2277
2278 See also:
2279 <set_activation_steepness>, <set_activation_steepness_layer>,
2280 <set_activation_steepness_hidden>, <set_activation_function_output>,
2281 <fann_set_activation_steepness_output>
2282
2283 This function appears in FANN >= 1.2.0.
2284 */
2285 void set_activation_steepness_output(fann_type steepness)
2286 {
2287 if (ann != NULL)
2288 {
2289 fann_set_activation_steepness_output(ann, steepness);
2290 }
2291 }
2292
2293 /*************************************************************************************************************/
2294
2295 /* Method: get_train_error_function
2296
2297 Returns the error function used during training.
2298
2299 The error functions is described further in <FANN::error_function_enum>
2300
2301 The default error function is FANN::ERRORFUNC_TANH
2302
2303 See also:
2304 <set_train_error_function>, <fann_get_train_error_function>
2305
2306 This function appears in FANN >= 1.2.0.
2307 */
2308 error_function_enum get_train_error_function()
2309 {
2310 fann_errorfunc_enum train_error_function = FANN_ERRORFUNC_LINEAR;
2311 if (ann != NULL)
2312 {
2313 train_error_function = fann_get_train_error_function(ann);
2314 }
2315 return static_cast<error_function_enum>(train_error_function);
2316 }
2317
2318 /* Method: set_train_error_function
2319
2320 Set the error function used during training.
2321
2322 The error functions is described further in <FANN::error_function_enum>
2323
2324 See also:
2325 <get_train_error_function>, <fann_set_train_error_function>
2326
2327 This function appears in FANN >= 1.2.0.
2328 */
2329 void set_train_error_function(error_function_enum train_error_function)
2330 {
2331 if (ann != NULL)
2332 {
2333 fann_set_train_error_function(ann,
2334 static_cast<fann_errorfunc_enum>(train_error_function));
2335 }
2336 }
2337
2338 /* Method: get_quickprop_decay
2339
2340 The decay is a small negative valued number which is the factor that the weights
2341 should become smaller in each iteration during quickprop training. This is used
2342 to make sure that the weights do not become too high during training.
2343
2344 The default decay is -0.0001.
2345
2346 See also:
2347 <set_quickprop_decay>, <fann_get_quickprop_decay>
2348
2349 This function appears in FANN >= 1.2.0.
2350 */
2351 float get_quickprop_decay()
2352 {
2353 float quickprop_decay = 0.0f;
2354 if (ann != NULL)
2355 {
2356 quickprop_decay = fann_get_quickprop_decay(ann);
2357 }
2358 return quickprop_decay;
2359 }
2360
2361 /* Method: set_quickprop_decay
2362
2363 Sets the quickprop decay factor.
2364
2365 See also:
2366 <get_quickprop_decay>, <fann_set_quickprop_decay>
2367
2368 This function appears in FANN >= 1.2.0.
2369 */
2370 void set_quickprop_decay(float quickprop_decay)
2371 {
2372 if (ann != NULL)
2373 {
2374 fann_set_quickprop_decay(ann, quickprop_decay);
2375 }
2376 }
2377
2378 /* Method: get_quickprop_mu
2379
2380 The mu factor is used to increase and decrease the step-size during quickprop training.
2381 The mu factor should always be above 1, since it would otherwise decrease the step-size
2382 when it was suppose to increase it.
2383
2384 The default mu factor is 1.75.
2385
2386 See also:
2387 <set_quickprop_mu>, <fann_get_quickprop_mu>
2388
2389 This function appears in FANN >= 1.2.0.
2390 */
2391 float get_quickprop_mu()
2392 {
2393 float quickprop_mu = 0.0f;
2394 if (ann != NULL)
2395 {
2396 quickprop_mu = fann_get_quickprop_mu(ann);
2397 }
2398 return quickprop_mu;
2399 }
2400
2401 /* Method: set_quickprop_mu
2402
2403 Sets the quickprop mu factor.
2404
2405 See also:
2406 <get_quickprop_mu>, <fann_set_quickprop_mu>
2407
2408 This function appears in FANN >= 1.2.0.
2409 */
2410 void set_quickprop_mu(float quickprop_mu)
2411 {
2412 if (ann != NULL)
2413 {
2414 fann_set_quickprop_mu(ann, quickprop_mu);
2415 }
2416 }
2417
2418 /* Method: get_rprop_increase_factor
2419
2420 The increase factor is a value larger than 1, which is used to
2421 increase the step-size during RPROP training.
2422
2423 The default increase factor is 1.2.
2424
2425 See also:
2426 <set_rprop_increase_factor>, <fann_get_rprop_increase_factor>
2427
2428 This function appears in FANN >= 1.2.0.
2429 */
2430 float get_rprop_increase_factor()
2431 {
2432 float factor = 0.0f;
2433 if (ann != NULL)
2434 {
2435 factor = fann_get_rprop_increase_factor(ann);
2436 }
2437 return factor;
2438 }
2439
2440 /* Method: set_rprop_increase_factor
2441
2442 The increase factor used during RPROP training.
2443
2444 See also:
2445 <get_rprop_increase_factor>, <fann_set_rprop_increase_factor>
2446
2447 This function appears in FANN >= 1.2.0.
2448 */
2449 void set_rprop_increase_factor(float rprop_increase_factor)
2450 {
2451 if (ann != NULL)
2452 {
2453 fann_set_rprop_increase_factor(ann, rprop_increase_factor);
2454 }
2455 }
2456
2457 /* Method: get_rprop_decrease_factor
2458
2459 The decrease factor is a value smaller than 1, which is used to decrease the step-size during RPROP training.
2460
2461 The default decrease factor is 0.5.
2462
2463 See also:
2464 <set_rprop_decrease_factor>, <fann_get_rprop_decrease_factor>
2465
2466 This function appears in FANN >= 1.2.0.
2467 */
2468 float get_rprop_decrease_factor()
2469 {
2470 float factor = 0.0f;
2471 if (ann != NULL)
2472 {
2473 factor = fann_get_rprop_decrease_factor(ann);
2474 }
2475 return factor;
2476 }
2477
2478 /* Method: set_rprop_decrease_factor
2479
2480 The decrease factor is a value smaller than 1, which is used to decrease the step-size during RPROP training.
2481
2482 See also:
2483 <get_rprop_decrease_factor>, <fann_set_rprop_decrease_factor>
2484
2485 This function appears in FANN >= 1.2.0.
2486 */
2487 void set_rprop_decrease_factor(float rprop_decrease_factor)
2488 {
2489 if (ann != NULL)
2490 {
2491 fann_set_rprop_decrease_factor(ann, rprop_decrease_factor);
2492 }
2493 }
2494
2495 /* Method: get_rprop_delta_min
2496
2497 The minimum step-size is a small positive number determining how small the minimum step-size may be.
2498
2499 The default value delta min is 0.0.
2500
2501 See also:
2502 <set_rprop_delta_min>, <fann_get_rprop_delta_min>
2503
2504 This function appears in FANN >= 1.2.0.
2505 */
2506 float get_rprop_delta_min()
2507 {
2508 float delta = 0.0f;
2509 if (ann != NULL)
2510 {
2511 delta = fann_get_rprop_delta_min(ann);
2512 }
2513 return delta;
2514 }
2515
2516 /* Method: set_rprop_delta_min
2517
2518 The minimum step-size is a small positive number determining how small the minimum step-size may be.
2519
2520 See also:
2521 <get_rprop_delta_min>, <fann_set_rprop_delta_min>
2522
2523 This function appears in FANN >= 1.2.0.
2524 */
2525 void set_rprop_delta_min(float rprop_delta_min)
2526 {
2527 if (ann != NULL)
2528 {
2529 fann_set_rprop_delta_min(ann, rprop_delta_min);
2530 }
2531 }
2532
2533 /* Method: get_rprop_delta_max
2534
2535 The maximum step-size is a positive number determining how large the maximum step-size may be.
2536
2537 The default delta max is 50.0.
2538
2539 See also:
2540 <set_rprop_delta_max>, <get_rprop_delta_min>, <fann_get_rprop_delta_max>
2541
2542 This function appears in FANN >= 1.2.0.
2543 */
2544 float get_rprop_delta_max()
2545 {
2546 float delta = 0.0f;
2547 if (ann != NULL)
2548 {
2549 delta = fann_get_rprop_delta_max(ann);
2550 }
2551 return delta;
2552 }
2553
2554 /* Method: set_rprop_delta_max
2555
2556 The maximum step-size is a positive number determining how large the maximum step-size may be.
2557
2558 See also:
2559 <get_rprop_delta_max>, <get_rprop_delta_min>, <fann_set_rprop_delta_max>
2560
2561 This function appears in FANN >= 1.2.0.
2562 */
2563 void set_rprop_delta_max(float rprop_delta_max)
2564 {
2565 if (ann != NULL)
2566 {
2567 fann_set_rprop_delta_max(ann, rprop_delta_max);
2568 }
2569 }
2570
2571 /* Method: get_num_input
2572
2573 Get the number of input neurons.
2574
2575 This function appears in FANN >= 1.0.0.
2576 */
2577 unsigned int get_num_input()
2578 {
2579 unsigned int num_input = 0;
2580 if (ann != NULL)
2581 {
2582 num_input = fann_get_num_input(ann);
2583 }
2584 return num_input;
2585 }
2586
2587 /* Method: get_num_output
2588
2589 Get the number of output neurons.
2590
2591 This function appears in FANN >= 1.0.0.
2592 */
2593 unsigned int get_num_output()
2594 {
2595 unsigned int num_output = 0;
2596 if (ann != NULL)
2597 {
2598 num_output = fann_get_num_output(ann);
2599 }
2600 return num_output;
2601 }
2602
2603 /* Method: get_total_neurons
2604
2605 Get the total number of neurons in the entire network. This number does also include the
2606 bias neurons, so a 2-4-2 network has 2+4+2 +2(bias) = 10 neurons.
2607
2608 This function appears in FANN >= 1.0.0.
2609 */
2610 unsigned int get_total_neurons()
2611 {
2612 if (ann == NULL)
2613 {
2614 return 0;
2615 }
2616 return fann_get_total_neurons(ann);
2617 }
2618
2619 /* Method: get_total_connections
2620
2621 Get the total number of connections in the entire network.
2622
2623 This function appears in FANN >= 1.0.0.
2624 */
2625 unsigned int get_total_connections()
2626 {
2627 if (ann == NULL)
2628 {
2629 return 0;
2630 }
2631 return fann_get_total_connections(ann);
2632 }
2633
2634#ifdef FIXEDFANN
2635 /* Method: get_decimal_point
2636
2637 Returns the position of the decimal point in the ann.
2638
2639 This function is only available when the ANN is in fixed point mode.
2640
2641 The decimal point is described in greater detail in the tutorial <Fixed Point Usage>.
2642
2643 See also:
2644 <Fixed Point Usage>, <get_multiplier>, <save_to_fixed>,
2645 <training_data::save_train_to_fixed>, <fann_get_decimal_point>
2646
2647 This function appears in FANN >= 1.0.0.
2648 */
2649 unsigned int get_decimal_point()
2650 {
2651 if (ann == NULL)
2652 {
2653 return 0;
2654 }
2655 return fann_get_decimal_point(ann);
2656 }
2657
2658 /* Method: get_multiplier
2659
2660 Returns the multiplier that fix point data is multiplied with.
2661
2662 This function is only available when the ANN is in fixed point mode.
2663
2664 The multiplier is the used to convert between floating point and fixed point notation.
2665 A floating point number is multiplied with the multiplier in order to get the fixed point
2666 number and visa versa.
2667
2668 The multiplier is described in greater detail in the tutorial <Fixed Point Usage>.
2669
2670 See also:
2671 <Fixed Point Usage>, <get_decimal_point>, <save_to_fixed>,
2672 <training_data::save_train_to_fixed>, <fann_get_multiplier>
2673
2674 This function appears in FANN >= 1.0.0.
2675 */
2676 unsigned int get_multiplier()
2677 {
2678 if (ann == NULL)
2679 {
2680 return 0;
2681 }
2682 return fann_get_multiplier(ann);
2683 }
2684#endif /* FIXEDFANN */
2685
2686 /*********************************************************************/
2687
2688 /* Method: get_network_type
2689
2690 Get the type of neural network it was created as.
2691
2692 Returns:
2693 The neural network type from enum <FANN::network_type_enum>
2694
2695 See Also:
2696 <fann_get_network_type>
2697
2698 This function appears in FANN >= 2.1.0
2699 */
2700 network_type_enum get_network_type()
2701 {
2702 fann_nettype_enum network_type = FANN_NETTYPE_LAYER;
2703 if (ann != NULL)
2704 {
2705 network_type = fann_get_network_type(ann);
2706 }
2707 return static_cast<network_type_enum>(network_type);
2708 }
2709
2710 /* Method: get_connection_rate
2711
2712 Get the connection rate used when the network was created
2713
2714 Returns:
2715 The connection rate
2716
2717 See also:
2718 <fann_get_connection_rate>
2719
2720 This function appears in FANN >= 2.1.0
2721 */
2722 float get_connection_rate()
2723 {
2724 if (ann == NULL)
2725 {
2726 return 0;
2727 }
2728 return fann_get_connection_rate(ann);
2729 }
2730
2731 /* Method: get_num_layers
2732
2733 Get the number of layers in the network
2734
2735 Returns:
2736 The number of layers in the neural network
2737
2738 See also:
2739 <fann_get_num_layers>
2740
2741 This function appears in FANN >= 2.1.0
2742 */
2743 unsigned int get_num_layers()
2744 {
2745 if (ann == NULL)
2746 {
2747 return 0;
2748 }
2749 return fann_get_num_layers(ann);
2750 }
2751
2752 /* Method: get_layer_array
2753
2754 Get the number of neurons in each layer in the network.
2755
2756 Bias is not included so the layers match the create methods.
2757
2758 The layers array must be preallocated to at least
2759 sizeof(unsigned int) * get_num_layers() long.
2760
2761 See also:
2762 <fann_get_layer_array>
2763
2764 This function appears in FANN >= 2.1.0
2765 */
2766 void get_layer_array(unsigned int *layers)
2767 {
2768 if (ann != NULL)
2769 {
2770 fann_get_layer_array(ann, layers);
2771 }
2772 }
2773
2774 void get_layer_array(std::vector<unsigned int> vlayers)
2775 {
2776 vlayers.clear();
2777 if (ann != NULL){
2778 vlayers.resize(get_num_layers());
2779 unsigned int* layers=(unsigned int *)calloc(get_num_layers(),sizeof(unsigned int));
2780 fann_get_layer_array(ann, layers);
2781 for(int ilayer=0;ilayer<get_num_layers();++ilayer)
2782 vlayers[ilayer]=*(layers++);
2783 }
2784 }
2785
2786 /* Method: get_bias_array
2787
2788 Get the number of bias in each layer in the network.
2789
2790 The bias array must be preallocated to at least
2791 sizeof(unsigned int) * get_num_layers() long.
2792
2793 See also:
2794 <fann_get_bias_array>
2795
2796 This function appears in FANN >= 2.1.0
2797 */
2798 void get_bias_array(unsigned int *bias)
2799 {
2800 if (ann != NULL)
2801 {
2802 fann_get_bias_array(ann, bias);
2803 }
2804 }
2805
2806 /* Method: get_connection_array
2807
2808 Get the connections in the network.
2809
2810 The connections array must be preallocated to at least
2811 sizeof(struct fann_connection) * get_total_connections() long.
2812
2813 See also:
2814 <fann_get_connection_array>
2815
2816 This function appears in FANN >= 2.1.0
2817 */
2818 void get_connection_array(connection *connections)
2819 {
2820 if (ann != NULL)
2821 {
2822 fann_get_connection_array(ann, connections);
2823 }
2824 }
2825
2826 void get_connection_array(std::vector<connection>& convector)
2827 {
2828 convector.clear();
2829 if (ann != NULL)
2830 {
2831 convector.resize(get_total_connections());
2832 connection* connections=(connection*)calloc(get_total_connections(),sizeof(connection));
2833 fann_get_connection_array(ann, connections);
2834 for(int icon=0;icon<get_total_connections();++icon)
2835 convector[icon]=*(connections++);
2836 }
2837 }
2838
2839 /* Method: set_weight_array
2840
2841 Set connections in the network.
2842
2843 Only the weights can be changed, connections and weights are ignored
2844 if they do not already exist in the network.
2845
2846 The array must have sizeof(struct fann_connection) * num_connections size.
2847
2848 See also:
2849 <fann_set_weight_array>
2850
2851 This function appears in FANN >= 2.1.0
2852 */
2853 void set_weight_array(connection *connections, unsigned int num_connections)
2854 {
2855 if (ann != NULL)
2856 {
2857 fann_set_weight_array(ann, connections, num_connections);
2858 }
2859 }
2860
2861 void set_weight_array(std::vector<connection> convector)
2862 {
2863 if (ann != NULL)
2864 {
2865 unsigned int num_connections=convector.size();
2866 connection* connections=(connection*)calloc(num_connections,sizeof(connection));
2867 connections=&(convector[0]);
2868 fann_set_weight_array(ann, connections, num_connections);
2869 }
2870 }
2871
2872 /* Method: set_weight
2873
2874 Set a connection in the network.
2875
2876 Only the weights can be changed. The connection/weight is
2877 ignored if it does not already exist in the network.
2878
2879 See also:
2880 <fann_set_weight>
2881
2882 This function appears in FANN >= 2.1.0
2883 */
2884 void set_weight(unsigned int from_neuron, unsigned int to_neuron, fann_type weight)
2885 {
2886 if (ann != NULL)
2887 {
2888 fann_set_weight(ann, from_neuron, to_neuron, weight);
2889 }
2890 }
2891
2892 /*********************************************************************/
2893
2894 /* Method: get_learning_momentum
2895
2896 Get the learning momentum.
2897
2898 The learning momentum can be used to speed up FANN::TRAIN_INCREMENTAL training.
2899 A too high momentum will however not benefit training. Setting momentum to 0 will
2900 be the same as not using the momentum parameter. The recommended value of this parameter
2901 is between 0.0 and 1.0.
2902
2903 The default momentum is 0.
2904
2905 See also:
2906 <set_learning_momentum>, <set_training_algorithm>
2907
2908 This function appears in FANN >= 2.0.0.
2909 */
2910 float get_learning_momentum()
2911 {
2912 float learning_momentum = 0.0f;
2913 if (ann != NULL)
2914 {
2915 learning_momentum = fann_get_learning_momentum(ann);
2916 }
2917 return learning_momentum;
2918 }
2919
2920 /* Method: set_learning_momentum
2921
2922 Set the learning momentum.
2923
2924 More info available in <get_learning_momentum>
2925
2926 This function appears in FANN >= 2.0.0.
2927 */
2928 void set_learning_momentum(float learning_momentum)
2929 {
2930 if (ann != NULL)
2931 {
2932 fann_set_learning_momentum(ann, learning_momentum);
2933 }
2934 }
2935
2936 /* Method: get_train_stop_function
2937
2938 Returns the the stop function used during training.
2939
2940 The stop function is described further in <FANN::stop_function_enum>
2941
2942 The default stop function is FANN::STOPFUNC_MSE
2943
2944 See also:
2945 <get_train_stop_function>, <get_bit_fail_limit>
2946
2947 This function appears in FANN >= 2.0.0.
2948 */
2949 stop_function_enum get_train_stop_function()
2950 {
2951 enum fann_stopfunc_enum stopfunc = FANN_STOPFUNC_MSE;
2952 if (ann != NULL)
2953 {
2954 stopfunc = fann_get_train_stop_function(ann);
2955 }
2956 return static_cast<stop_function_enum>(stopfunc);
2957 }
2958
2959 /* Method: set_train_stop_function
2960
2961 Set the stop function used during training.
2962
2963 The stop function is described further in <FANN::stop_function_enum>
2964
2965 See also:
2966 <get_train_stop_function>
2967
2968 This function appears in FANN >= 2.0.0.
2969 */
2970 void set_train_stop_function(stop_function_enum train_stop_function)
2971 {
2972 if (ann != NULL)
2973 {
2974 fann_set_train_stop_function(ann,
2975 static_cast<enum fann_stopfunc_enum>(train_stop_function));
2976 }
2977 }
2978
2979 /* Method: get_bit_fail_limit
2980
2981 Returns the bit fail limit used during training.
2982
2983 The bit fail limit is used during training when the <FANN::stop_function_enum> is set to FANN_STOPFUNC_BIT.
2984
2985 The limit is the maximum accepted difference between the desired output and the actual output during
2986 training. Each output that diverges more than this limit is counted as an error bit.
2987 This difference is divided by two when dealing with symmetric activation functions,
2988 so that symmetric and not symmetric activation functions can use the same limit.
2989
2990 The default bit fail limit is 0.35.
2991
2992 See also:
2993 <set_bit_fail_limit>
2994
2995 This function appears in FANN >= 2.0.0.
2996 */
2997 fann_type get_bit_fail_limit()
2998 {
2999 fann_type bit_fail_limit = 0.0f;
3000
3001 if (ann != NULL)
3002 {
3003 bit_fail_limit = fann_get_bit_fail_limit(ann);
3004 }
3005 return bit_fail_limit;
3006 }
3007
3008 /* Method: set_bit_fail_limit
3009
3010 Set the bit fail limit used during training.
3011
3012 See also:
3013 <get_bit_fail_limit>
3014
3015 This function appears in FANN >= 2.0.0.
3016 */
3017 void set_bit_fail_limit(fann_type bit_fail_limit)
3018 {
3019 if (ann != NULL)
3020 {
3021 fann_set_bit_fail_limit(ann, bit_fail_limit);
3022 }
3023 }
3024
3025 /* Method: get_bit_fail
3026
3027 The number of fail bits; means the number of output neurons which differ more
3028 than the bit fail limit (see <get_bit_fail_limit>, <set_bit_fail_limit>).
3029 The bits are counted in all of the training data, so this number can be higher than
3030 the number of training data.
3031
3032 This value is reset by <reset_MSE> and updated by all the same functions which also
3033 updates the MSE value (e.g. <test_data>, <train_epoch>)
3034
3035 See also:
3036 <FANN::stop_function_enum>, <get_MSE>
3037
3038 This function appears in FANN >= 2.0.0
3039 */
3040 unsigned int get_bit_fail()
3041 {
3042 unsigned int bit_fail = 0;
3043 if (ann != NULL)
3044 {
3045 bit_fail = fann_get_bit_fail(ann);
3046 }
3047 return bit_fail;
3048 }
3049
3050 /*********************************************************************/
3051
3052 /* Method: cascadetrain_on_data
3053
3054 Trains on an entire dataset, for a period of time using the Cascade2 training algorithm.
3055 This algorithm adds neurons to the neural network while training, which means that it
3056 needs to start with an ANN without any hidden layers. The neural network should also use
3057 shortcut connections, so <create_shortcut> should be used to create the ANN like this:
3058 >net.create_shortcut(2, train_data.num_input_train_data(), train_data.num_output_train_data());
3059
3060 This training uses the parameters set using the set_cascade_..., but it also uses another
3061 training algorithm as it's internal training algorithm. This algorithm can be set to either
3062 FANN::TRAIN_RPROP or FANN::TRAIN_QUICKPROP by <set_training_algorithm>, and the parameters
3063 set for these training algorithms will also affect the cascade training.
3064
3065 Parameters:
3066 data - The data, which should be used during training
3067 max_neuron - The maximum number of neurons to be added to neural network
3068 neurons_between_reports - The number of neurons between printing a status report to stdout.
3069 A value of zero means no reports should be printed.
3070 desired_error - The desired <fann_get_MSE> or <fann_get_bit_fail>, depending on which stop function
3071 is chosen by <fann_set_train_stop_function>.
3072
3073 Instead of printing out reports every neurons_between_reports, a callback function can be called
3074 (see <set_callback>).
3075
3076 See also:
3077 <train_on_data>, <cascadetrain_on_file>, <fann_cascadetrain_on_data>
3078
3079 This function appears in FANN >= 2.0.0.
3080 */
3081 void cascadetrain_on_data(const training_data &data, unsigned int max_neurons,
3082 unsigned int neurons_between_reports, float desired_error)
3083 {
3084 if ((ann != NULL) && (data.train_data != NULL))
3085 {
3086 fann_cascadetrain_on_data(ann, data.train_data, max_neurons,
3087 neurons_between_reports, desired_error);
3088 }
3089 }
3090
3091 /* Method: cascadetrain_on_file
3092
3093 Does the same as <cascadetrain_on_data>, but reads the training data directly from a file.
3094
3095 See also:
3096 <fann_cascadetrain_on_data>, <fann_cascadetrain_on_file>
3097
3098 This function appears in FANN >= 2.0.0.
3099 */
3100 void cascadetrain_on_file(const std::string &filename, unsigned int max_neurons,
3101 unsigned int neurons_between_reports, float desired_error)
3102 {
3103 if (ann != NULL)
3104 {
3105 fann_cascadetrain_on_file(ann, filename.c_str(),
3106 max_neurons, neurons_between_reports, desired_error);
3107 }
3108 }
3109
3110 /* Method: get_cascade_output_change_fraction
3111
3112 The cascade output change fraction is a number between 0 and 1 determining how large a fraction
3113 the <get_MSE> value should change within <get_cascade_output_stagnation_epochs> during
3114 training of the output connections, in order for the training not to stagnate. If the training
3115 stagnates, the training of the output connections will be ended and new candidates will be prepared.
3116
3117 This means:
3118 If the MSE does not change by a fraction of <get_cascade_output_change_fraction> during a
3119 period of <get_cascade_output_stagnation_epochs>, the training of the output connections
3120 is stopped because the training has stagnated.
3121
3122 If the cascade output change fraction is low, the output connections will be trained more and if the
3123 fraction is high they will be trained less.
3124
3125 The default cascade output change fraction is 0.01, which is equalent to a 1% change in MSE.
3126
3127 See also:
3128 <set_cascade_output_change_fraction>, <get_MSE>,
3129 <get_cascade_output_stagnation_epochs>, <fann_get_cascade_output_change_fraction>
3130
3131 This function appears in FANN >= 2.0.0.
3132 */
3133 float get_cascade_output_change_fraction()
3134 {
3135 float change_fraction = 0.0f;
3136 if (ann != NULL)
3137 {
3138 change_fraction = fann_get_cascade_output_change_fraction(ann);
3139 }
3140 return change_fraction;
3141 }
3142
3143 /* Method: set_cascade_output_change_fraction
3144
3145 Sets the cascade output change fraction.
3146
3147 See also:
3148 <get_cascade_output_change_fraction>, <fann_set_cascade_output_change_fraction>
3149
3150 This function appears in FANN >= 2.0.0.
3151 */
3152 void set_cascade_output_change_fraction(float cascade_output_change_fraction)
3153 {
3154 if (ann != NULL)
3155 {
3156 fann_set_cascade_output_change_fraction(ann, cascade_output_change_fraction);
3157 }
3158 }
3159
3160 /* Method: get_cascade_output_stagnation_epochs
3161
3162 The number of cascade output stagnation epochs determines the number of epochs training is allowed to
3163 continue without changing the MSE by a fraction of <get_cascade_output_change_fraction>.
3164
3165 See more info about this parameter in <get_cascade_output_change_fraction>.
3166
3167 The default number of cascade output stagnation epochs is 12.
3168
3169 See also:
3170 <set_cascade_output_stagnation_epochs>, <get_cascade_output_change_fraction>,
3171 <fann_get_cascade_output_stagnation_epochs>
3172
3173 This function appears in FANN >= 2.0.0.
3174 */
3175 unsigned int get_cascade_output_stagnation_epochs()
3176 {
3177 unsigned int stagnation_epochs = 0;
3178 if (ann != NULL)
3179 {
3180 stagnation_epochs = fann_get_cascade_output_stagnation_epochs(ann);
3181 }
3182 return stagnation_epochs;
3183 }
3184
3185 /* Method: set_cascade_output_stagnation_epochs
3186
3187 Sets the number of cascade output stagnation epochs.
3188
3189 See also:
3190 <get_cascade_output_stagnation_epochs>, <fann_set_cascade_output_stagnation_epochs>
3191
3192 This function appears in FANN >= 2.0.0.
3193 */
3194 void set_cascade_output_stagnation_epochs(unsigned int cascade_output_stagnation_epochs)
3195 {
3196 if (ann != NULL)
3197 {
3198 fann_set_cascade_output_stagnation_epochs(ann, cascade_output_stagnation_epochs);
3199 }
3200 }
3201
3202 /* Method: get_cascade_candidate_change_fraction
3203
3204 The cascade candidate change fraction is a number between 0 and 1 determining how large a fraction
3205 the <get_MSE> value should change within <get_cascade_candidate_stagnation_epochs> during
3206 training of the candidate neurons, in order for the training not to stagnate. If the training
3207 stagnates, the training of the candidate neurons will be ended and the best candidate will be selected.
3208
3209 This means:
3210 If the MSE does not change by a fraction of <get_cascade_candidate_change_fraction> during a
3211 period of <get_cascade_candidate_stagnation_epochs>, the training of the candidate neurons
3212 is stopped because the training has stagnated.
3213
3214 If the cascade candidate change fraction is low, the candidate neurons will be trained more and if the
3215 fraction is high they will be trained less.
3216
3217 The default cascade candidate change fraction is 0.01, which is equalent to a 1% change in MSE.
3218
3219 See also:
3220 <set_cascade_candidate_change_fraction>, <get_MSE>,
3221 <get_cascade_candidate_stagnation_epochs>, <fann_get_cascade_candidate_change_fraction>
3222
3223 This function appears in FANN >= 2.0.0.
3224 */
3225 float get_cascade_candidate_change_fraction()
3226 {
3227 float change_fraction = 0.0f;
3228 if (ann != NULL)
3229 {
3230 change_fraction = fann_get_cascade_candidate_change_fraction(ann);
3231 }
3232 return change_fraction;
3233 }
3234
3235 /* Method: set_cascade_candidate_change_fraction
3236
3237 Sets the cascade candidate change fraction.
3238
3239 See also:
3240 <get_cascade_candidate_change_fraction>,
3241 <fann_set_cascade_candidate_change_fraction>
3242
3243 This function appears in FANN >= 2.0.0.
3244 */
3245 void set_cascade_candidate_change_fraction(float cascade_candidate_change_fraction)
3246 {
3247 if (ann != NULL)
3248 {
3249 fann_set_cascade_candidate_change_fraction(ann, cascade_candidate_change_fraction);
3250 }
3251 }
3252
3253 /* Method: get_cascade_candidate_stagnation_epochs
3254
3255 The number of cascade candidate stagnation epochs determines the number of epochs training is allowed to
3256 continue without changing the MSE by a fraction of <get_cascade_candidate_change_fraction>.
3257
3258 See more info about this parameter in <get_cascade_candidate_change_fraction>.
3259
3260 The default number of cascade candidate stagnation epochs is 12.
3261
3262 See also:
3263 <set_cascade_candidate_stagnation_epochs>, <get_cascade_candidate_change_fraction>,
3264 <fann_get_cascade_candidate_stagnation_epochs>
3265
3266 This function appears in FANN >= 2.0.0.
3267 */
3268 unsigned int get_cascade_candidate_stagnation_epochs()
3269 {
3270 unsigned int stagnation_epochs = 0;
3271 if (ann != NULL)
3272 {
3273 stagnation_epochs = fann_get_cascade_candidate_stagnation_epochs(ann);
3274 }
3275 return stagnation_epochs;
3276 }
3277
3278 /* Method: set_cascade_candidate_stagnation_epochs
3279
3280 Sets the number of cascade candidate stagnation epochs.
3281
3282 See also:
3283 <get_cascade_candidate_stagnation_epochs>,
3284 <fann_set_cascade_candidate_stagnation_epochs>
3285
3286 This function appears in FANN >= 2.0.0.
3287 */
3288 void set_cascade_candidate_stagnation_epochs(unsigned int cascade_candidate_stagnation_epochs)
3289 {
3290 if (ann != NULL)
3291 {
3292 fann_set_cascade_candidate_stagnation_epochs(ann, cascade_candidate_stagnation_epochs);
3293 }
3294 }
3295
3296 /* Method: get_cascade_weight_multiplier
3297
3298 The weight multiplier is a parameter which is used to multiply the weights from the candidate neuron
3299 before adding the neuron to the neural network. This parameter is usually between 0 and 1, and is used
3300 to make the training a bit less aggressive.
3301
3302 The default weight multiplier is 0.4
3303
3304 See also:
3305 <set_cascade_weight_multiplier>, <fann_get_cascade_weight_multiplier>
3306
3307 This function appears in FANN >= 2.0.0.
3308 */
3309 fann_type get_cascade_weight_multiplier()
3310 {
3311 fann_type weight_multiplier = 0;
3312 if (ann != NULL)
3313 {
3314 weight_multiplier = fann_get_cascade_weight_multiplier(ann);
3315 }
3316 return weight_multiplier;
3317 }
3318
3319 /* Method: set_cascade_weight_multiplier
3320
3321 Sets the weight multiplier.
3322
3323 See also:
3324 <get_cascade_weight_multiplier>, <fann_set_cascade_weight_multiplier>
3325
3326 This function appears in FANN >= 2.0.0.
3327 */
3328 void set_cascade_weight_multiplier(fann_type cascade_weight_multiplier)
3329 {
3330 if (ann != NULL)
3331 {
3332 fann_set_cascade_weight_multiplier(ann, cascade_weight_multiplier);
3333 }
3334 }
3335
3336 /* Method: get_cascade_candidate_limit
3337
3338 The candidate limit is a limit for how much the candidate neuron may be trained.
3339 The limit is a limit on the proportion between the MSE and candidate score.
3340
3341 Set this to a lower value to avoid overfitting and to a higher if overfitting is
3342 not a problem.
3343
3344 The default candidate limit is 1000.0
3345
3346 See also:
3347 <set_cascade_candidate_limit>, <fann_get_cascade_candidate_limit>
3348
3349 This function appears in FANN >= 2.0.0.
3350 */
3351 fann_type get_cascade_candidate_limit()
3352 {
3353 fann_type candidate_limit = 0;
3354 if (ann != NULL)
3355 {
3356 candidate_limit = fann_get_cascade_candidate_limit(ann);
3357 }
3358 return candidate_limit;
3359 }
3360
3361 /* Method: set_cascade_candidate_limit
3362
3363 Sets the candidate limit.
3364
3365 See also:
3366 <get_cascade_candidate_limit>, <fann_set_cascade_candidate_limit>
3367
3368 This function appears in FANN >= 2.0.0.
3369 */
3370 void set_cascade_candidate_limit(fann_type cascade_candidate_limit)
3371 {
3372 if (ann != NULL)
3373 {
3374 fann_set_cascade_candidate_limit(ann, cascade_candidate_limit);
3375 }
3376 }
3377
3378 /* Method: get_cascade_max_out_epochs
3379
3380 The maximum out epochs determines the maximum number of epochs the output connections
3381 may be trained after adding a new candidate neuron.
3382
3383 The default max out epochs is 150
3384
3385 See also:
3386 <set_cascade_max_out_epochs>, <fann_get_cascade_max_out_epochs>
3387
3388 This function appears in FANN >= 2.0.0.
3389 */
3390 unsigned int get_cascade_max_out_epochs()
3391 {
3392 unsigned int max_out_epochs = 0;
3393 if (ann != NULL)
3394 {
3395 max_out_epochs = fann_get_cascade_max_out_epochs(ann);
3396 }
3397 return max_out_epochs;
3398 }
3399
3400 /* Method: set_cascade_max_out_epochs
3401
3402 Sets the maximum out epochs.
3403
3404 See also:
3405 <get_cascade_max_out_epochs>, <fann_set_cascade_max_out_epochs>
3406
3407 This function appears in FANN >= 2.0.0.
3408 */
3409 void set_cascade_max_out_epochs(unsigned int cascade_max_out_epochs)
3410 {
3411 if (ann != NULL)
3412 {
3413 fann_set_cascade_max_out_epochs(ann, cascade_max_out_epochs);
3414 }
3415 }
3416
3417 /* Method: get_cascade_max_cand_epochs
3418
3419 The maximum candidate epochs determines the maximum number of epochs the input
3420 connections to the candidates may be trained before adding a new candidate neuron.
3421
3422 The default max candidate epochs is 150
3423
3424 See also:
3425 <set_cascade_max_cand_epochs>, <fann_get_cascade_max_cand_epochs>
3426
3427 This function appears in FANN >= 2.0.0.
3428 */
3429 unsigned int get_cascade_max_cand_epochs()
3430 {
3431 unsigned int max_cand_epochs = 0;
3432 if (ann != NULL)
3433 {
3434 max_cand_epochs = fann_get_cascade_max_cand_epochs(ann);
3435 }
3436 return max_cand_epochs;
3437 }
3438
3439 /* Method: set_cascade_max_cand_epochs
3440
3441 Sets the max candidate epochs.
3442
3443 See also:
3444 <get_cascade_max_cand_epochs>, <fann_set_cascade_max_cand_epochs>
3445
3446 This function appears in FANN >= 2.0.0.
3447 */
3448 void set_cascade_max_cand_epochs(unsigned int cascade_max_cand_epochs)
3449 {
3450 if (ann != NULL)
3451 {
3452 fann_set_cascade_max_cand_epochs(ann, cascade_max_cand_epochs);
3453 }
3454 }
3455
3456 /* Method: get_cascade_num_candidates
3457
3458 The number of candidates used during training (calculated by multiplying <get_cascade_activation_functions_count>,
3459 <get_cascade_activation_steepnesses_count> and <get_cascade_num_candidate_groups>).
3460
3461 The actual candidates is defined by the <get_cascade_activation_functions> and
3462 <get_cascade_activation_steepnesses> arrays. These arrays define the activation functions
3463 and activation steepnesses used for the candidate neurons. If there are 2 activation functions
3464 in the activation function array and 3 steepnesses in the steepness array, then there will be
3465 2x3=6 different candidates which will be trained. These 6 different candidates can be copied into
3466 several candidate groups, where the only difference between these groups is the initial weights.
3467 If the number of groups is set to 2, then the number of candidate neurons will be 2x3x2=12. The
3468 number of candidate groups is defined by <set_cascade_num_candidate_groups>.
3469
3470 The default number of candidates is 6x4x2 = 48
3471
3472 See also:
3473 <get_cascade_activation_functions>, <get_cascade_activation_functions_count>,
3474 <get_cascade_activation_steepnesses>, <get_cascade_activation_steepnesses_count>,
3475 <get_cascade_num_candidate_groups>, <fann_get_cascade_num_candidates>
3476
3477 This function appears in FANN >= 2.0.0.
3478 */
3479 unsigned int get_cascade_num_candidates()
3480 {
3481 unsigned int num_candidates = 0;
3482 if (ann != NULL)
3483 {
3484 num_candidates = fann_get_cascade_num_candidates(ann);
3485 }
3486 return num_candidates;
3487 }
3488
3489 /* Method: get_cascade_activation_functions_count
3490
3491 The number of activation functions in the <get_cascade_activation_functions> array.
3492
3493 The default number of activation functions is 6.
3494
3495 See also:
3496 <get_cascade_activation_functions>, <set_cascade_activation_functions>,
3497 <fann_get_cascade_activation_functions_count>
3498
3499 This function appears in FANN >= 2.0.0.
3500 */
3501 unsigned int get_cascade_activation_functions_count()
3502 {
3503 unsigned int activation_functions_count = 0;
3504 if (ann != NULL)
3505 {
3506 activation_functions_count = fann_get_cascade_activation_functions_count(ann);
3507 }
3508 return activation_functions_count;
3509 }
3510
3511 /* Method: get_cascade_activation_functions
3512
3513 The cascade activation functions array is an array of the different activation functions used by
3514 the candidates.
3515
3516 See <get_cascade_num_candidates> for a description of which candidate neurons will be
3517 generated by this array.
3518
3519 See also:
3520 <get_cascade_activation_functions_count>, <set_cascade_activation_functions>,
3521 <FANN::activation_function_enum>
3522
3523 This function appears in FANN >= 2.0.0.
3524 */
3525 activation_function_enum * get_cascade_activation_functions()
3526 {
3527 enum fann_activationfunc_enum *activation_functions = NULL;
3528 if (ann != NULL)
3529 {
3530 activation_functions = fann_get_cascade_activation_functions(ann);
3531 }
3532 return reinterpret_cast<activation_function_enum *>(activation_functions);
3533 }
3534
3535 /* Method: set_cascade_activation_functions
3536
3537 Sets the array of cascade candidate activation functions. The array must be just as long
3538 as defined by the count.
3539
3540 See <get_cascade_num_candidates> for a description of which candidate neurons will be
3541 generated by this array.
3542
3543 See also:
3544 <get_cascade_activation_steepnesses_count>, <get_cascade_activation_steepnesses>,
3545 <fann_set_cascade_activation_functions>
3546
3547 This function appears in FANN >= 2.0.0.
3548 */
3549 void set_cascade_activation_functions(activation_function_enum *cascade_activation_functions,
3550 unsigned int cascade_activation_functions_count)
3551 {
3552 if (ann != NULL)
3553 {
3554 fann_set_cascade_activation_functions(ann,
3555 reinterpret_cast<enum fann_activationfunc_enum *>(cascade_activation_functions),
3556 cascade_activation_functions_count);
3557 }
3558 }
3559
3560 /* Method: get_cascade_activation_steepnesses_count
3561
3562 The number of activation steepnesses in the <get_cascade_activation_functions> array.
3563
3564 The default number of activation steepnesses is 4.
3565
3566 See also:
3567 <get_cascade_activation_steepnesses>, <set_cascade_activation_functions>,
3568 <fann_get_cascade_activation_steepnesses_count>
3569
3570 This function appears in FANN >= 2.0.0.
3571 */
3572 unsigned int get_cascade_activation_steepnesses_count()
3573 {
3574 unsigned int activation_steepness_count = 0;
3575 if (ann != NULL)
3576 {
3577 activation_steepness_count = fann_get_cascade_activation_steepnesses_count(ann);
3578 }
3579 return activation_steepness_count;
3580 }
3581
3582 /* Method: get_cascade_activation_steepnesses
3583
3584 The cascade activation steepnesses array is an array of the different activation functions used by
3585 the candidates.
3586
3587 See <get_cascade_num_candidates> for a description of which candidate neurons will be
3588 generated by this array.
3589
3590 The default activation steepnesses is {0.25, 0.50, 0.75, 1.00}
3591
3592 See also:
3593 <set_cascade_activation_steepnesses>, <get_cascade_activation_steepnesses_count>,
3594 <fann_get_cascade_activation_steepnesses>
3595
3596 This function appears in FANN >= 2.0.0.
3597 */
3598 fann_type *get_cascade_activation_steepnesses()
3599 {
3600 fann_type *activation_steepnesses = NULL;
3601 if (ann != NULL)
3602 {
3603 activation_steepnesses = fann_get_cascade_activation_steepnesses(ann);
3604 }
3605 return activation_steepnesses;
3606 }
3607
3608 /* Method: set_cascade_activation_steepnesses
3609
3610 Sets the array of cascade candidate activation steepnesses. The array must be just as long
3611 as defined by the count.
3612
3613 See <get_cascade_num_candidates> for a description of which candidate neurons will be
3614 generated by this array.
3615
3616 See also:
3617 <get_cascade_activation_steepnesses>, <get_cascade_activation_steepnesses_count>,
3618 <fann_set_cascade_activation_steepnesses>
3619
3620 This function appears in FANN >= 2.0.0.
3621 */
3622 void set_cascade_activation_steepnesses(fann_type *cascade_activation_steepnesses,
3623 unsigned int cascade_activation_steepnesses_count)
3624 {
3625 if (ann != NULL)
3626 {
3627 fann_set_cascade_activation_steepnesses(ann,
3628 cascade_activation_steepnesses, cascade_activation_steepnesses_count);
3629 }
3630 }
3631
3632 /* Method: get_cascade_num_candidate_groups
3633
3634 The number of candidate groups is the number of groups of identical candidates which will be used
3635 during training.
3636
3637 This number can be used to have more candidates without having to define new parameters for the candidates.
3638
3639 See <get_cascade_num_candidates> for a description of which candidate neurons will be
3640 generated by this parameter.
3641
3642 The default number of candidate groups is 2
3643
3644 See also:
3645 <set_cascade_num_candidate_groups>, <fann_get_cascade_num_candidate_groups>
3646
3647 This function appears in FANN >= 2.0.0.
3648 */
3649 unsigned int get_cascade_num_candidate_groups()
3650 {
3651 unsigned int num_candidate_groups = 0;
3652 if (ann != NULL)
3653 {
3654 num_candidate_groups = fann_get_cascade_num_candidate_groups(ann);
3655 }
3656 return num_candidate_groups;
3657 }
3658
3659 /* Method: set_cascade_num_candidate_groups
3660
3661 Sets the number of candidate groups.
3662
3663 See also:
3664 <get_cascade_num_candidate_groups>, <fann_set_cascade_num_candidate_groups>
3665
3666 This function appears in FANN >= 2.0.0.
3667 */
3668 void set_cascade_num_candidate_groups(unsigned int cascade_num_candidate_groups)
3669 {
3670 if (ann != NULL)
3671 {
3672 fann_set_cascade_num_candidate_groups(ann, cascade_num_candidate_groups);
3673 }
3674 }
3675
3676 /*********************************************************************/
3677
3678#ifndef FIXEDFANN
3679 /* Method: scale_train
3680
3681 Scale input and output data based on previously calculated parameters.
3682
3683 See also:
3684 <descale_train>, <fann_scale_train>
3685
3686 This function appears in FANN >= 2.1.0.
3687 */
3688 void scale_train(training_data &data)
3689 {
3690 if (ann != NULL)
3691 {
3692 fann_scale_train(ann, data.train_data);
3693 }
3694 }
3695
3696 /* Method: descale_train
3697
3698 Descale input and output data based on previously calculated parameters.
3699
3700 See also:
3701 <scale_train>, <fann_descale_train>
3702
3703 This function appears in FANN >= 2.1.0.
3704 */
3705 void descale_train(training_data &data)
3706 {
3707 if (ann != NULL)
3708 {
3709 fann_descale_train(ann, data.train_data);
3710 }
3711 }
3712
3713 /* Method: set_input_scaling_params
3714
3715 Calculate scaling parameters for future use based on training data.
3716
3717 See also:
3718 <set_output_scaling_params>, <fann_set_input_scaling_params>
3719
3720 This function appears in FANN >= 2.1.0.
3721 */
3722 bool set_input_scaling_params(const training_data &data, float new_input_min, float new_input_max)
3723 {
3724 bool status = false;
3725 if (ann != NULL)
3726 {
3727 status = (fann_set_input_scaling_params(ann, data.train_data, new_input_min, new_input_max) != -1);
3728 }
3729 return status;
3730 }
3731
3732 /* Method: set_output_scaling_params
3733
3734 Calculate scaling parameters for future use based on training data.
3735
3736 See also:
3737 <set_input_scaling_params>, <fann_set_output_scaling_params>
3738
3739 This function appears in FANN >= 2.1.0.
3740 */
3741 bool set_output_scaling_params(const training_data &data, float new_output_min, float new_output_max)
3742 {
3743 bool status = false;
3744 if (ann != NULL)
3745 {
3746 status = (fann_set_output_scaling_params(ann, data.train_data, new_output_min, new_output_max) != -1);
3747 }
3748 return status;
3749 }
3750
3751 /* Method: set_scaling_params
3752
3753 Calculate scaling parameters for future use based on training data.
3754
3755 See also:
3756 <clear_scaling_params>, <fann_set_scaling_params>
3757
3758 This function appears in FANN >= 2.1.0.
3759 */
3760 bool set_scaling_params(const training_data &data,
3761 float new_input_min, float new_input_max, float new_output_min, float new_output_max)
3762 {
3763 bool status = false;
3764 if (ann != NULL)
3765 {
3766 status = (fann_set_scaling_params(ann, data.train_data,
3767 new_input_min, new_input_max, new_output_min, new_output_max) != -1);
3768 }
3769 return status;
3770 }
3771
3772 /* Method: clear_scaling_params
3773
3774 Clears scaling parameters.
3775
3776 See also:
3777 <set_scaling_params>, <fann_clear_scaling_params>
3778
3779 This function appears in FANN >= 2.1.0.
3780 */
3781 bool clear_scaling_params()
3782 {
3783 bool status = false;
3784 if (ann != NULL)
3785 {
3786 status = (fann_clear_scaling_params(ann) != -1);
3787 }
3788 return status;
3789 }
3790
3791 /* Method: scale_input
3792
3793 Scale data in input vector before feed it to ann based on previously calculated parameters.
3794
3795 See also:
3796 <descale_input>, <scale_output>, <fann_scale_input>
3797
3798 This function appears in FANN >= 2.1.0.
3799 */
3800 void scale_input(fann_type *input_vector)
3801 {
3802 if (ann != NULL)
3803 {
3804 fann_scale_input(ann, input_vector );
3805 }
3806 }
3807
3808 /* Method: scale_output
3809
3810 Scale data in output vector before feed it to ann based on previously calculated parameters.
3811
3812 See also:
3813 <descale_output>, <scale_input>, <fann_scale_output>
3814
3815 This function appears in FANN >= 2.1.0.
3816 */
3817 void scale_output(fann_type *output_vector)
3818 {
3819 if (ann != NULL)
3820 {
3821 fann_scale_output(ann, output_vector );
3822 }
3823 }
3824
3825 /* Method: descale_input
3826
3827 Scale data in input vector after get it from ann based on previously calculated parameters.
3828
3829 See also:
3830 <scale_input>, <descale_output>, <fann_descale_input>
3831
3832 This function appears in FANN >= 2.1.0.
3833 */
3834 void descale_input(fann_type *input_vector)
3835 {
3836 if (ann != NULL)
3837 {
3838 fann_descale_input(ann, input_vector );
3839 }
3840 }
3841
3842 /* Method: descale_output
3843
3844 Scale data in output vector after get it from ann based on previously calculated parameters.
3845
3846 See also:
3847 <scale_output>, <descale_input>, <fann_descale_output>
3848
3849 This function appears in FANN >= 2.1.0.
3850 */
3851 void descale_output(fann_type *output_vector)
3852 {
3853 if (ann != NULL)
3854 {
3855 fann_descale_output(ann, output_vector );
3856 }
3857 }
3858
3859#endif /* FIXEDFANN */
3860
3861 /*********************************************************************/
3862
3863 /* Method: set_error_log
3864
3865 Change where errors are logged to.
3866
3867 If log_file is NULL, no errors will be printed.
3868
3869 If neural_net is empty i.e. ann is NULL, the default log will be set.
3870 The default log is the log used when creating a neural_net.
3871 This default log will also be the default for all new structs
3872 that are created.
3873
3874 The default behavior is to log them to stderr.
3875
3876 See also:
3877 <struct fann_error>, <fann_set_error_log>
3878
3879 This function appears in FANN >= 1.1.0.
3880 */
3881 void set_error_log(FILE *log_file)
3882 {
3883 fann_set_error_log(reinterpret_cast<struct fann_error *>(ann), log_file);
3884 }
3885
3886 /* Method: get_errno
3887
3888 Returns the last error number.
3889
3890 See also:
3891 <fann_errno_enum>, <fann_reset_errno>, <fann_get_errno>
3892
3893 This function appears in FANN >= 1.1.0.
3894 */
3895 unsigned int get_errno()
3896 {
3897 return fann_get_errno(reinterpret_cast<struct fann_error *>(ann));
3898 }
3899
3900 /* Method: reset_errno
3901
3902 Resets the last error number.
3903
3904 This function appears in FANN >= 1.1.0.
3905 */
3906 void reset_errno()
3907 {
3908 fann_reset_errno(reinterpret_cast<struct fann_error *>(ann));
3909 }
3910
3911 /* Method: reset_errstr
3912
3913 Resets the last error string.
3914
3915 This function appears in FANN >= 1.1.0.
3916 */
3917 void reset_errstr()
3918 {
3919 fann_reset_errstr(reinterpret_cast<struct fann_error *>(ann));
3920 }
3921
3922 /* Method: get_errstr
3923
3924 Returns the last errstr.
3925
3926 This function calls <fann_reset_errno> and <fann_reset_errstr>
3927
3928 This function appears in FANN >= 1.1.0.
3929 */
3930 std::string get_errstr()
3931 {
3932 return std::string(fann_get_errstr(reinterpret_cast<struct fann_error *>(ann)));
3933 }
3934
3935 /* Method: print_error
3936
3937 Prints the last error to stderr.
3938
3939 This function appears in FANN >= 1.1.0.
3940 */
3941 void print_error()
3942 {
3943 fann_print_error(reinterpret_cast<struct fann_error *>(ann));
3944 }
3945
3946 /*********************************************************************/
3947
3948 private:
3949 // Structure used by set_callback to hold information about a user callback
3950 typedef struct user_context_type
3951 {
3952 callback_type user_callback; // Pointer to user callback function
3953 void *user_data; // Arbitrary data pointer passed to the callback
3954 neural_net *net; // This pointer for the neural network
3955 } user_context;
3956
3957 // Internal callback used to convert from pointers to class references
3958 static int FANN_API internal_callback(struct fann *ann, struct fann_train_data *train,
3959 unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, unsigned int epochs)
3960 {
3961 user_context *user_data = static_cast<user_context *>(fann_get_user_data(ann));
3962 if (user_data != NULL)
3963 {
3965 data.train_data = train;
3966
3967 int result = (*user_data->user_callback)(*user_data->net,
3968 data, max_epochs, epochs_between_reports, desired_error, epochs, user_data);
3969
3970 data.train_data = NULL; // Prevent automatic cleanup
3971 return result;
3972 }
3973 else
3974 {
3975 return -1; // This should not occur except if out of memory
3976 }
3977 }
3978 protected:
3979 // Pointer the encapsulated fann neural net structure
3980 struct fann *ann;
3981 };
3982
3983 /*************************************************************************/
3984};
3985
3986#endif /* FANN_CPP_H_INCLUDED */