ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

基于逐步法思想的多元线性回归(改进)

2022-01-16 13:00:38  阅读:166  来源: 互联网

标签:index 基于 Exam print 多元 np Train 线性 Data


学期结束,稍微完善下之前的程序,新增,对原始数据进行归一化处理,以及模型预测值和真实值的比对图示。

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import f as F
from sklearn.model_selection import train_test_split

#train data
Train_Data = []
#test data
Test_Data = []
#Y
Y_Train = []
Y_Test = []
Y_Pred = []
#Train_Data.T.dot(Train_Data)
C = []
#向前法偏F检验结果
F_Exam = []
#向后法偏F检验结果
F_Exam_Back = []
#右分位数,令Fin=Fout
quant = 0
#无关变量索引
irrel_Train_Data = []
#权重
weight = []
#样本尺寸
n_samples,n_features =0,0


def Data_Process(Source_Data_Path):
    global Train_Data,Y_Train,C,n_samples,n_features,Test_Data,Y_Test
    Train_Data = np.array(pd.read_excel(Source_Data_Path))
    Y_Train = Train_Data[:,-1]
    Train_Data = Train_Data[:,0:-1]
    print('original data:\n',Train_Data.shape,Y_Train.shape)
    #Normalization
    Col_Mean=np.mean(Train_Data,axis=0)
    Col_Mean=np.array(Col_Mean)
    print(Col_Mean.shape)
    col_var=np.std(Train_Data,axis=0,ddof=1);
    col_var=np.array(col_var)
    Train_Data = Train_Data-Col_Mean
    Train_Data /= col_var
    Train_Data,Test_Data,Y_Train,Y_Test = train_test_split(Train_Data,Y_Train,train_size=0.95,random_state=1);
    print('train set:\n',Train_Data.shape)
    print('test set:\n',Test_Data.shape)
    T_Train_Data = np.array([1] * Train_Data.shape[0])
    T_Test_Data = np.array([1] * Test_Data.shape[0])
    Train_Data = np.array(np.insert(Train_Data,0,values = T_Train_Data, axis=1),dtype=np.float64)
    Test_Data = np.array(np.insert(Test_Data,0,values = T_Test_Data, axis=1),dtype=np.float64)
    n_samples,n_features = Train_Data.shape
    C = np.linalg.pinv(Train_Data.T.dot(Train_Data))


def Linear_Regress():
    global n_features,Train_Data,n_samples,C,irrel_Train_Data,weight
    index = 1
    #逐步回归
    while index < n_features:
        print('Train_Data variations:\n',Train_Data.shape)
        num_Train_Data = index +1
        Train_Data_default = Train_Data[:,0:num_Train_Data]
        print('Train_Data_default',Train_Data_default.shape)
        #LSM, get weight
        weight = np.linalg.pinv(Train_Data_default.T.dot(Train_Data_default)).dot(Train_Data_default.T).dot(Y_Train)
        #output weight
        print('the ',index,' weight:\n',weight)
        #forward
        print('******************************************向前迭代******************************************')
        if 1-F_Examination(Get_Q(weight,num_Train_Data),weight,index,'Forward'):
             #重置
             index = 1
             n_features -= 1
             Train_Data = np.delete(Train_Data,num_Train_Data-1,1)
             if n_features>1:
                 #reset
                 n_samples,n_features = Train_Data.shape
                 C = np.linalg.pinv(Train_Data.T.dot(Train_Data))
                 irrel_Train_Data.append(num_Train_Data-1)
                 continue
             else:
                 print('因变量与所有自变量线性无关!')
                 break
        #back
        elif len(F_Exam)>=2:
            print('******************************************向后迭代******************************************')
            F_Exam_Back.clear()
            for num in range (1,len(F_Exam)):
                print('向后法对第 ',num,' 个变量做偏F检验')
                # F examination for the i parameter
                F_Examination(Get_Q(weight,num_Train_Data),weight,num,'Back')
            min_index = F_Exam_Back.index(min(F_Exam_Back))
            if min(F_Exam_Back) <= quant:
                print('the MIN index of F_Exam_Back:\n',min_index)
                #reset
                index = 1
                #delete unrelated varibles
                n_features -= 1
                Train_Data = np.delete(Train_Data,min_index+1,1)
                irrel_Train_Data.append(min_index+1)
                if n_features>1:
                    #reset
                    n_samples,n_features = Train_Data.shape
                    C = np.linalg.pinv(Train_Data.T.dot(Train_Data))
                    F_Exam.clear()
                    print('删除变量后从第一个变量重新开始逐步回归算法!')
                    continue
                else:
                    print('因变量与所有自变量线性无关!')
                    break
            else:
                print('F_Exam_Back最小值索引:\n',min_index)
                print('所有变量对Y_Train的影响效果显著!')
            index +=1
        #iteration
        else:
            index +=1
    #舍去的变量权重置零
    for j in range(len(irrel_Train_Data)):
        weight = np.insert(weight,irrel_Train_Data[j],0)
    print('最终的拟合系数为:\n',weight)


#计算偏F检验
def F_Examination(Q,weight,index,mark):
    global quant,F_Exam
    #弃真
    alpha = 0.05
    n = n_samples-n_features-2
    print('样本自由度:\n',n)
    quant = F.isf(q=alpha, dfn=1, dfd=n)
    print('分位数:\n',quant)
    sigma2 = Q/(n)
    print( 'sigma2',sigma2)
    β2 = (weight[index]) ** 2
    print('对角线元素:\n',C[index][index])
    if mark == 'Forward':
        F_Exam.append(β2/(sigma2 * C[index][index]))
        print('F_Exam:\n',F_Exam)
        if max(F_Exam) < quant :
            del F_Exam[index-1]
            return False
        else:
            print('F_Exam大小:\n',len(F_Exam))
            return True
    else:
        F_Exam_Back.append(β2/(sigma2 * C[index][index]))
        print('F_Exam_Back:\n',F_Exam_Back)


def Get_Y():
    global Y_Pred
    Y_Pred_Temp =  np.zeros(len(Y_Test),dtype=np.float64)
    for i in range(len(weight)):
        Y_Pred_Temp += weight[i] * Test_Data[:,i]
    Y_Pred = Y_Pred_Temp


#获取Q
def Get_Q(weight,num_param):
    predict_Y_Train = np.zeros(n_samples,dtype=np.float64)
    for index in range(num_param):
        predict_Y_Train += weight[index] * Train_Data[:,index]
    Q_Para = Y_Train - predict_Y_Train
    Q = np.inner(Q_Para,Q_Para)
    return Q


def figure2D():
    #image path
    image_path='D:/File/project_pca/exp_image/'
    plt.figure(1)
    plt.plot(range(len(Y_Pred)),Y_Pred,'b',label="Predict");
    plt.plot(range(len(Y_Test)),Y_Test,'r',label="Real");
    plt.xlabel("Number");
    plt.ylabel("Value");
    plt.title('Predict vs Real')
    image_name='_Predict_vs_Real.png'
    whole_image_path=image_path+image_name
    plt.legend()  # 用于显示plot函数里面 label标签
    plt.savefig(whole_image_path,dpi=800,bbox_inches='tight')
    plt.show()
    plt.clf()


#process control
if __name__ == '__main__':
    Data_Process('D:\\File\\project_pca\\data_segmentation\\abalon.xlsx')
    #逐步回归
    Linear_Regress()
    #根据模型获取预测数据
    Get_Y()
    #绘制
    figure2D()

数据集:

http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data

标签:index,基于,Exam,print,多元,np,Train,线性,Data
来源: https://blog.csdn.net/a138_a/article/details/122521666

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有