代码拉取完成,页面将自动刷新
/**
******************************************************************************
* @file bpNeuralNetwork2.c
* @author 古么宁
* @brief 两层隐含层 bp 神经网络实现
* 文件参考了网络上不少博文,如 https://www.cnblogs.com/mthoutai/p/7047144.html
* <注:此文件用到变长数组,要C99以上或GNUC版本编译器支持>
******************************************************************************
*
* COPYRIGHT(c) 2018 GoodMorning
*
******************************************************************************
*/
/* Includes ---------------------------------------------------*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include "bpNeuralNetwork2.h"
/* Private types ------------------------------------------------------------*/
/* Private macro ------------------------------------------------------------*/
//激活函数
#define Sigmod(x) (1 / (1 + exp(-1.0 * (x))))
/* Private variables ------------------------------------------------------------*/
/* Global variables ------------------------------------------------------------*/
/* Private function prototypes -----------------------------------------------*/
/* Gorgeous Split-line -----------------------------------------------*/
/*
* @brief bpnn_init 初始化神经网络的内存
*
* @param bpnn : 所指神经网络
*/
void bpnn_init(bpnn_t * bpnn)
{
uint32_t size ;
//初始化神经网络的内存分配
double * buf = (double *) bpnn->min_in ;
buf += bpnn->netin;
bpnn->max_in = (void*)buf;
buf += bpnn->netin;
bpnn->min_out = (void*)buf;
buf += bpnn->netout;
bpnn->max_out = (void*)buf;
buf += bpnn->netout;
bpnn->ly1Wi = (void*)buf ;
buf += (bpnn->netin * bpnn->ly1nn);
bpnn->ly1Wo = (void*)buf ;
buf += (bpnn->ly2nn * bpnn->ly1nn);
bpnn->ly2Wo = (void*)buf ;
size = bpnn->netin;
size+= bpnn->netin;
size+= bpnn->netout;
size+= bpnn->netout;
size+= (bpnn->netin * bpnn->ly1nn);
size+= (bpnn->ly2nn * bpnn->ly1nn);
size+= (bpnn->ly2nn * bpnn->netout);
memset(bpnn->min_in , 0 , sizeof(double)*size);
// printf("init:%d\r\n",sizeof(double)*size);
}
//已经归一化的一个信号前向传递过程
//dInx : 已经归一化的输入向量;dOut 神经网络的总输出向量
static void forward_transfer(bpnn_t * bpnn ,double *samplein ,double * ly1res,double * ly2res , double *out)
{
double (*ly1Wi)[bpnn->ly1nn] = bpnn->ly1Wi ;
double (*ly1Wo)[bpnn->ly2nn] = bpnn->ly1Wo ;
double (*ly2Wo)[bpnn->netout] = bpnn->ly2Wo ;
double bpnnres[bpnn->netout] ;
double neuron_in ;
// for (uint32_t i = 0 ; i < bpnn->netout ; ++i)bpnnres[i] = 0;
memset(bpnnres,0,sizeof(double)*bpnn->netout);
//第一层隐含层神经元输出计算
for (uint32_t ly1n = 0; ly1n < bpnn->ly1nn; ++ly1n)
{
neuron_in = 0;
for (uint32_t i = 0 ; i < bpnn->netin ; ++i)//获得某一个神经元的输入
neuron_in += ly1Wi[i][ly1n] * samplein[i];
//激活函数,获得当前神经元的输出
ly1res[ly1n] = Sigmod(neuron_in);
}
//第二层隐含层神经元输出计算
for (uint32_t ly2n = 0; ly2n < bpnn->ly2nn; ++ly2n)
{
neuron_in = 0.0;//获得某一个神经元的输入
for (uint32_t ly1n = 0; ly1n < bpnn->ly1nn; ++ly1n)
neuron_in += ly1Wo[ly1n][ly2n] * ly1res[ly1n];
//激活函数,获得当前神经元的输出,保存在全局数组 ly2out[]
ly2res[ly2n] = Sigmod(neuron_in);
//第二层神经元权重输出的累加为神经网络的总输出
for (uint32_t i = 0 ; i < bpnn->netout ; ++i)
bpnnres[i] += ly2res[ly2n] * ly2Wo[ly2n][i];
}
for (uint32_t i = 0 ; i < bpnn->netout ; ++i)
out[i] = Sigmod(bpnnres[i]);
}
/*
* @brief back_propagation
* 用某个样品点的输入值和对应的网络输出反向修正整个网络
*
* @param bpnn : 所指神经网络
* @param delta : bpnn 的修正值
* @param trcfg : 修正 bpnn 的参数,学习速率等
*
* @param samplein : 某个样点的输入,形如 samplein[netin];
* 一个样点共有 netin 个输入
*
* @param sampleout : 某个样点的输出,形如 samplein[netout];
* 一个样点共有 netout 个输出
*
* @param size : size 样点总个数
*/
static void back_propagation (bpnn_t * bpnn,
bpnn_t * delta,
bpnntr_t * trcfg ,
double * samplein ,
double * sampleout ,
double * ly1res,
double * ly2res,
double * out )
{
double (*ly1Wi)[bpnn->ly1nn] = bpnn->ly1Wi ;
double (*ly1Wo)[bpnn->ly2nn] = bpnn->ly1Wo ;
double (*ly2Wo)[bpnn->netout] = bpnn->ly2Wo ;
double (*ly1dWi)[delta->ly1nn] = delta->ly1Wi ;
double (*ly1dWo)[delta->ly2nn] = delta->ly1Wo ;
double (*ly2dWo)[delta->netout] = delta->ly2Wo ;
double netout_d[bpnn->netout]; //输出节点的误差信号
double layer1_d[bpnn->ly1nn]; //隐藏层1的误差信号
double layer2_d[bpnn->ly2nn]; //隐藏层2的误差信号
double neuron_delta; //具体到某一个神经元的误差信号
memset(layer1_d,0,sizeof(double)*bpnn->ly1nn);
memset(layer2_d,0,sizeof(double)*bpnn->ly2nn);
//求得输出节点的误差信号,f'(x)*delta() = f(x)(1-f(x))*delta()
for (uint32_t i = 0 ; i < bpnn->netout ; ++i) // δ = (d - o)o(1-o)
netout_d[i] = (out[i] - sampleout[i]) * out[i] * (1 - out[i]);
//求得隐含层的误差信号
for (uint32_t ly2n = 0; ly2n < bpnn->ly2nn; ++ly2n)
{
for (uint32_t i = 0 ; i < bpnn->netout ; ++i)
layer2_d[ly2n] += ly2Wo[ly2n][i] * netout_d[i];// Σ(Wjk * δ)
for (uint32_t ly1n = 0; ly1n < bpnn->ly1nn; ++ly1n)
layer1_d[ly1n] += layer2_d[ly2n] * ly1Wo[ly1n][ly2n];
}
//用输出节点的误差信号调整隐藏层2到输出层的权重
for (uint32_t ly2n = 0; ly2n < bpnn->ly2nn; ++ly2n)
{
for (uint32_t i = 0 ; i < bpnn->netout ; ++i)
{
ly2dWo[ly2n][i] = trcfg->ly2WoA * ly2dWo[ly2n][i] +
trcfg->ly2WoB * ly2res[ly2n] * netout_d[i] ; // ΔWjk = η(d - o)o(1-o)y
ly2Wo[ly2n][i] -= ly2dWo[ly2n][i];
}
}
//用隐藏层2的误差信号调整隐藏层1到隐藏层2的权重
for (uint32_t ly1n = 0; ly1n < bpnn->ly1nn; ++ly1n)
{
for (uint32_t ly2n = 0; ly2n < bpnn->ly2nn; ++ly2n)
{
neuron_delta = layer2_d[ly2n] * ly2res[ly2n] * (1 - ly2res[ly2n]) * ly1res[ly1n];
ly1dWo[ly1n][ly2n] = trcfg->ly1WoA * ly1dWo[ly1n][ly2n] +
trcfg->ly1WoB * neuron_delta;
ly1Wo[ly1n][ly2n] -= ly1dWo[ly1n][ly2n];
}
}
//用隐藏层1的误差信号调整输入层到隐藏层1的权重
for (uint32_t ly1n = 0; ly1n < bpnn->ly1nn; ++ly1n)
{
for (uint32_t in = 0; in < bpnn->netin; ++in)
{
neuron_delta = layer1_d[ly1n] * ly1res[ly1n] * (1 - ly1res[ly1n]) * samplein[in];
ly1dWi[in][ly1n] = trcfg->ly1WiA * ly1dWi[in][ly1n] +
trcfg->ly1WiB * neuron_delta;
ly1Wi[in][ly1n] -= ly1dWi[in][ly1n];
}
}
}
/*
* @brief bpneuralnetwork_config
* 初始化神经网络参数,包括样点的归一化处理
*
* @param bpnn : 所指神经网络
*
* @param samplein : 样点的输入,形如 samplein[size][netin];
* size 表示第几个样点,一个样点共有 netin 个输入
*
* @param sampleout : 样点的输出,形如 samplein[size][netout];
* size 表示第几个样点,一个样点共有 netout 个输出
*
* @param size : size 样点总个数
*/
static void bpneuralnetwork_config(bpnn_t * bpnn , void * sample_in, void * sample_out, uint32_t size)
{
uint32_t smp;
double value;
double * min_in = bpnn->min_in ; // min_in[netin]
double * max_in = bpnn->max_in ; // max_in[netin]
double * min_out = bpnn->min_out ; // min_out[netout]
double * max_out = bpnn->max_out ; // max_out[netout]
double (*ly1Wi)[bpnn->ly1nn] = bpnn->ly1Wi ;
double (*ly1Wo)[bpnn->ly2nn] = bpnn->ly1Wo ;
double (*ly2Wo)[bpnn->netout] = bpnn->ly2Wo ;
double (*samplein) [bpnn->netin] = sample_in ;
double (*sampleout)[bpnn->netout] = sample_out;
for (uint32_t i = 0; i < bpnn->netin ; ++i) //需要找出输入向量每个维度的最大最小值
{
min_in[i] = samplein[0][i];
max_in[i] = samplein[0][i];
}
for (uint32_t i = 0 ; i < bpnn->netout ; ++i)
{
min_out[i] = sampleout[0][i];
max_out[i] = sampleout[0][i];
}
//找到输入向量的每个维度的最大值和最小值
for (smp = 1; smp < size; ++smp)
{
for (uint32_t i = 0; i < bpnn->netin ; ++i) //需要找出输入向量每个维度的最大最小值
{
value = samplein[smp][i];
if (min_in[i] > value)
min_in[i] = value;
if (max_in[i] < value)
max_in[i] = value;
}
for (uint32_t i = 0 ; i < bpnn->netout ; ++i)
{
value = sampleout[smp][i];
if (min_out[i] > value)
min_out[i] = value;
if (max_out[i] < value)
max_out[i] = value;
}
}
#if 0
for (uint32_t i = 0 ; i < bpnn->netin ; ++i)
printf("%f,%f\r\n",min_in[i],max_in[i]);
for (uint32_t i = 0 ; i < bpnn->netout ; ++i)
printf("%f,%f\r\n",min_out[i],max_out[i]);
#endif
//对数据进行归一化处理,并为 输入输出权重赋初值
for (smp = 0; smp < size; ++smp)
{
for (uint32_t i = 0; i < bpnn->netin; ++i) //对向量的每一项都进行归一化处理
{
double smp_min = samplein[smp][i] - min_in[i] + 1;
double max_min = max_in[i] - min_in[i] + 1;
samplein[smp][i] = smp_min / max_min;
}
for (uint32_t i = 0; i < bpnn->netout; ++i) //对向量的每一项都进行归一化处理
{
double smp_min = sampleout[smp][i] - min_out[i] + 1;
double max_min = max_out[i] - min_out[i] + 1;
sampleout[smp][i] = smp_min / max_min;
}
}
//初始化神经元各个权重
//初始化第一层隐含层的输入权重
for (uint32_t i = 0; i < bpnn->netin; ++i)
{
for (uint32_t ly1n = 0; ly1n < bpnn->ly1nn; ++ly1n)
ly1Wi[i][ly1n] = rand() * 2.0 / RAND_MAX - 1;
}
//第一层隐含层的输出权重是第二层的输入权重
for (uint32_t ly1n = 0; ly1n < bpnn->ly1nn; ++ly1n)
{
for (uint32_t ly2n = 0; ly2n < bpnn->ly2nn; ++ly2n)
ly1Wo[ly1n][ly2n] = rand() * 2.0 / RAND_MAX - 1;
}
//第二层的输出权重
for (uint32_t ly2n = 0; ly2n < bpnn->ly2nn; ++ly2n)
{
for (uint32_t i = 0; i < bpnn->netout; ++i)
ly2Wo[ly2n][i] = rand() * 2.0 / RAND_MAX - 1;
}
}
/*
* @brief bpnn_train
* 用样点训练一个 bp 神经网络
*
* @param trcfg : 样点的训练信息,初始化学习速率和训练次数
*
* @param samplein : 样点的输入,形如 samplein[size][netin];
* size 表示第几个样点,一个样点共有 netin 个输入
*
* @param sampleout : 样点的输出,形如 samplein[size][netout];
* size 表示第几个样点,一个样点共有 netout 个输出
*
* @param size : size 样点总个数
*/
void bpnn_train(bpnn_t * bpnn , bpnntr_t * trcfg , void * sample_in, void * sample_out, uint32_t size)
{
double errsum ;
double errsgl;
uint32_t trtimes = 0;
uint32_t cnt = 0;
double (*samplein) [bpnn->netin] = sample_in ;
double (*sampleout)[bpnn->netout] = sample_out;
double ly1out[bpnn->ly1nn];//第一层隐含层神经元通过激活函数对外的输出
double ly2out[bpnn->ly2nn];//第二层隐含层神经元通过激活函数对外的输出
double netout[bpnn->netout];
double netoutwidth[bpnn->netout];
if (NULL == bpnn->ly1Wi)
{
printf("Uninitialized bp Neural Network\r\n");
return ;
}
//申请内存,用于更新 bpnn 的网络参数
BPNN_MALLOC(delta,bpnn->netin,bpnn->netout,bpnn->ly1nn,bpnn->ly2nn);
if (NULL == delta.min_in)
{
printf("not enough memory to train bp Neural Network\r\n");
return ;
}
bpnn_init(bpnn); //初始化内存
bpnn_init(&delta);//初始化内存
//归一化样点数据,并初始化 bpnn 网络参数
bpneuralnetwork_config( bpnn , sample_in , sample_out , size );
double * max_out = bpnn->max_out;
double * min_out = bpnn->min_out;
for (uint32_t i = 0 ; i < bpnn->netout ; ++i)
netoutwidth[i] = max_out[i] - min_out[i];
do
{
errsum = 0.0;
for (uint32_t smp = 0; smp < size; ++smp)
{
//把样点输入,得到第一层输出 ly1out,第二层输出 ly2out,和网络输出 netout
forward_transfer(bpnn , &samplein[smp][0],ly1out,ly2out,netout);
//用 netout 与样点对比,反馈调整网络
back_propagation(bpnn ,&delta ,trcfg ,&samplein[smp][0] ,&sampleout[smp][0],ly1out,ly2out,netout);
for (uint32_t i = 0 ; i < bpnn->netout ; ++i)//累计误差精度
{
errsgl = (netout[i] - sampleout[smp][i])*netoutwidth[i];
errsum += fabs(errsgl);
}
}
if (++cnt > 999)
{
cnt = 0;
//printf("\rErrorSum %10.3lf", errsum); fflush(stdout);//刷新缓冲区
printf("%d train ,error sum :%f\r\n",trtimes,errsum);
}
}
while (++trtimes < trcfg->maxt && errsum > trcfg->limit);
printf("\rtrain %d times,error sum :%f\r\n",trtimes , errsum);
BPNN_FREE(&delta);
}
/*
* @brief bpnn_calculate
*/
void bpnn_calculate(bpnn_t * bpnn , double * in , double * out)
{
double * min_in = bpnn->min_in;
double * max_in = bpnn->max_in;
double * min_out = bpnn->min_out;
double * max_out = bpnn->max_out;
double netin [bpnn->netin] ;
double netout[bpnn->netout];
double ly1out[bpnn->ly1nn];//储存第一层隐含层的输出
double ly2out[bpnn->ly2nn];//储存第二层隐含层的输出
for (uint32_t i = 0; i < bpnn->netin ; ++i)//归一化处理
netin[i] = (in[i] - min_in[i] + 1) /(max_in[i] - min_in[i] + 1);
forward_transfer(bpnn , netin , ly1out , ly2out , netout);
for (uint32_t i = 0; i < bpnn->netout ; ++i)//反归一化
out[i] = netout[i] * (max_out[i] - min_out[i] + 1) + min_out[i] - 1; //反归一化 ;
}
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。