0
点赞
收藏
分享

微信扫一扫

python 实现LDA二分类

亿奇学 2022-01-11 阅读 47
import torch
from getData import loadFeatur,loadFeatureByCorrNum,loadFeatureByNorm

from torch_geometric.data import DataLoader
from GCNmodel import GCN,GNN,createGNNdataset
import numpy as np
import random
print("\n---------Starting to load Data---------\n")
# giftedData,commonData,allData=loadFeatureByCorrNum()
giftedData,commonData,allData=loadFeatureByNorm(rootFile='F:/HCP_data/',kind='aparc.a2009s')
# giftedData,commonData,allData=loadFeatureByNorm(rootFile='D:/HCP1200/')
# train_dataset = allData[:300]
# test_dataset = allData[300:]
train_dataset=giftedData[:100]+commonData[:100]#list
test_dataset=giftedData[100:]+commonData[100:]

#####LDA
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression

X=[x[0] for x in train_dataset]
y_train=[y[1] for y in train_dataset]
X=np.array(X)
x_train=np.resize(X,(len(train_dataset),9*148))

X_=[x[0] for x in test_dataset]
y_test=[y[1] for y in test_dataset]
X_=np.array(X_)
x_test=np.resize(X_,(len(test_dataset),9*148))

# 进行标准化处理 因为目标结果经过sigmoid函数转换成了[0,1]之间的概率,所以目标值不需要进行标准化。
std = StandardScaler()
x_train = std.fit_transform(x_train)
x_test = std.transform(x_test)
# 逻辑回归预测
lg = LogisticRegression(C=1.0) # 默认使用L2正则化避免过拟合,C=1.0表示正则力度(超参数,可以调参调优)
lg.fit(x_train, y_train)
# 回归系数
print(lg.coef_) # [[1.12796779 0.28741414 0.66944043 ...]]
# 进行预测
y_predict = lg.predict(x_test)
print("准确率:", lg.score(x_test, y_test))#0.5159574468085106
举报

相关推荐

0 条评论