import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Reshape, BatchNormalization, concatenate, Input
from keras.layers import Conv2D, MaxPooling2D, SeparableConv2D, ZeroPadding2D, Conv1D, MaxPooling1D
from keras.layers import LSTM, Bidirectional, TimeDistributed, RepeatVector, GRU
from keras.optimizers import Adam, SGD, Nadam
from keras.layers import LeakyReLU
from keras.regularizers import l1, l2
from keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint
import scipy.io as sco
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
#随机对原始数据进行采样生成训练集和测试集
def sample(TrainRate, Data, Label):
n, m = np.shape(Data)
DataIndex = list(range(m))
TrainData = []
TrainLabel = []
TestData = []
TestLabel = []
for i in range(int(TrainRate*m)):
Randindex = int(np.random.uniform(0, len(DataIndex)))
TrainData.append(Data[:, Randindex])
TrainLabel.append(Label[:, Randindex])
del (DataIndex[Randindex])
for i in DataIndex:
TestData.append(Data[:, i])
TestLabel.append(Label[:, i])
TrainLabel = np.array(TrainLabel)
TestLabel = np.array(TestLabel)
TrainData = np.array(TrainData)
TestData = np.array(TestData)
return TrainData, TrainLabel, TestData, TestLabel
def tenfold(Data, Label, iter):
n, m = np.shape(Data)
Data = list(np.transpose(Data))
Label = list(np.transpose(Label))
m_jr = 10000
m_ur = m - m_jr
m_jr = m_jr // 10
m_ur = m_ur // 10
TestData = Data[((iter - 1) * m_jr) : (iter * m_jr)] + Data[((iter - 1) * m_ur + m_jr) : (iter * m_ur + m_jr)]
TestLabel = Label[((iter - 1) * m_jr) : (iter * m_jr)] + Label[((iter - 1) * m_ur + m_jr) : (iter * m_ur + m_jr)]
TrainData = Data[: ((iter - 1) * m_jr)] + Data[(iter * m_jr):((iter - 1) * m_ur + m_jr)] + Data[(iter * m_ur + m_jr):]
TrainLabel = Label[: ((iter - 1) * m_jr)] + Data[(iter * m_jr):((iter - 1) * m_ur + m_jr)] + Data[(iter * m_ur + m_jr):]
TrainLabel = np.array(TrainLabel)
TrainData = np.array(TrainData)
TestData = np.array(TestData)
TestLabel = np.array(TestLabel)
return TrainData, TrainLabel, TestData, TestLabel
def mix_sample(TrainRate, Data, Label):
n, m = np.shape(Data)
m_jr = 10000
m_ur = m - m_jr
#对Jasper随机采样
DataIndex = list(range(m_jr))
TrainData = []
TrainLabel = []
TestData = []
TestLabel = []
for i in range(int(TrainRate*m_jr)):
Randindex = int(np.random.uniform(0, len(DataIndex)))
TrainData.append(Data[:, Randindex])
TrainLabel.append(Label[:, Randindex])
del (DataIndex[Randindex])
for i in DataIndex:
TestData.append(Data[:, i])
TestLabel.append(Label[:, i])
#对Urban随机采样
DataIndex = list(range(m_ur))
for i in range(int(TrainRate * m_ur)):
Randindex = int(np.random.uniform(0, len(DataIndex)))
TrainData.append(Data[:, Randindex + m_jr])
TrainLabel.append(Label[:, Randindex + m_jr])
del (DataIndex[Randindex])
for i in DataIndex:
TestData.append(Data[:, i + m_jr])
TestLabel.append(Label[:, i + m_jr])
TrainLabel = np.array(TrainLabel)
TestLabel = np.array(TestLabel)
TrainData = np.array(TrainData)
TestData = np.array(TestData)
return TrainData, TrainLabel, TestData, TestLabel
def combat_rnn_without_cc(objective, optimizer, metrics ):
X = Input(shape=(157, 1))
lstm1 = Bidirectional(LSTM(10, return_sequences=True, dropout=0.2))(X)
lstm1 = BatchNormalization()(lstm1)
lstm2 = Bidirectional(LSTM(10, return_sequences=True, dropout=0.3))(lstm1)
lstm2 = BatchNormalization()(lstm2)
lstm3 = Bidirectional(LSTM(10, return_sequences=True, dropout=0.4))(lstm2)
lstm3 = BatchNormalization()(lstm3)
X2 = Flatten()(lstm3)
Global = Conv1D(3, 5, activation='relu', padding='same')(X)
Global = MaxPooling1D(pool_size=2)(Global)
Global = Conv1D(6, 4, activation='relu')(Global)
Global = MaxPooling1D(pool_size=2)(Global)
Global = Conv1D(12, 5, activation='relu')(Global)
Global = MaxPooling1D(pool_size=2)(Global)
Global = Conv1D(24, 4, activation='relu')(Global)
Global = MaxPooling1D(pool_size=2)(Global)
Global = Flatten()(Global)
Con = concatenate([X2, Global])
Den1 = Dense(600, activation='relu', use_bias=None)(Con)
Global = Dense(150, activation='relu', use_bias=None)(Den1)
Abadunce = Dense(6, activation='softmax')(Global)
model = Model(input=X, output=Abadunce)
model.compile(loss=objective, optimizer=optimizer, metrics=metrics)
return model
def combat_rnn_with_cc(objective, optimizer, metrics ):
X = Input(shape=(157, 1))
lstm1 = Bidirectional(LSTM(10, return_sequences=True, dropout=0.2))(X)
lstm1 = BatchNormalization()(lstm1)
lstm2 = Bidirectional(LSTM(10, return_sequences=True, dropout=0.3))(lstm1)
lstm2 = BatchNormalization()(lstm2)
lstm3 = Bidirectional(LSTM(10, return_sequences=True, dropout=0.4))(lstm2)
lstm3 = BatchNormalization()(lstm3)
X2 = Flatten()(lstm3)
Global = Conv1D(3, 5, activation='relu', padding='same')(X)
Global = MaxPooling1D(pool_size=2)(Global)
Global = Conv1D(6, 4, activation='relu')(Global)
Global = MaxPooling1D(pool_size=2)(Global)
Global = Conv1D(12, 5, activation='relu')(Global)
Global = MaxPooling1D(pool_size=2)(Global)
Global = Conv1D(24, 4, activation='relu')(Global)
Global = MaxPooling1D(pool_size=2)(Global)
Global = Flatten()(Global)
Con = concatenate([X2, Global])
Den1 = Dense(600, activation='relu', use_bias=None)(Con)
Global = Dense(150, activation='relu', use_bias=None)(Den1)
Abadunce = Reshape((1, 150))(Global)
Abadunce = Bidirectional(LSTM(100, dropout=0.2))(Abadunce)
Abadunce = BatchNormalization()(Abadunce)
Abadunce = RepeatVector(6)(Abadunce)
Abadunce = Bidirectional(LSTM(50, return_sequences=True, kernel_regularizer=l2(0.0001), dropout=0.2))(Abadunce)
Abadunce = BatchNormalization()(Abadunce)
Abadunce = Bidirectional(LSTM(20, return_sequences=True, kernel_regularizer=l2(0.0001)))(Abadunce)
Abadunce = TimeDistributed(Dense(1, activation='sigmoid'))(Abadunce)
model = Model(input=X, output=Abadunce)
model.compile(loss=objective, optimizer=optimizer, metrics=metrics)
return model
def pixel_rnn_without_cc(objective, optimizer, metrics):
X = Input(shape=(157, 1))
lstm1 = Bidirectional(LSTM(10, return_sequences=True, dropout=0.2))(X)
lstm1 = BatchNormalization()(lstm1)
lstm2 = Bidirectional(LSTM(10, return_sequences=True, dropout=0.3))(lstm1)
lstm2 = BatchNormalization()(lstm2)
lstm3 = Bidirectional(LSTM(10, return_sequences=True, dropout=0.4))(lstm2)
lstm3 = BatchNormalization()(lstm3)
X2 = Flatten()(lstm3)
Den1 = Dense(600, activation='relu', use_bias=None)(X2)
Global = Dense(150, activation='relu', use_bias=None)(Den1)
Abadunce = Dense(6, activation='softmax')(Global)
model = Model(input=X, output=Abadunce)
model.compile(loss=objective, optimizer=optimizer, metrics=metrics)
return model
def pixel_rnn_with_cc(objective, optimizer, metrics):
X = Input(shape=(157, 1))
lstm1 = Bidirectional(LSTM(10, return_sequences=True, dropout=0.2))(X)
lstm1 = BatchNormalization()(lstm1)
lstm2 = Bidirectional(LSTM(10, return_sequences=True, dropout=0.3))(lstm1)
lstm2 = BatchNormalization()(lstm2)
lstm3 = Bidirectional(LSTM(10, return_sequences=True, dropout=0.4))(lstm2)
lstm3 = BatchNormalization()(lstm3)
X2 = Flatten()(lstm3)
Den1 = Dense(600, activation='relu', use_bias=None)(X2)
Global = Dense(150, activation='relu', use_bias=None)(Den1)
Abadunce = Reshape((1, 150))(Global)
Abadunce = Bidirectional(LSTM(100, dropout=0.2))(Abadunce)
Abadunce = BatchNormalization()(Abadunce)
Abadunce =
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
【资源说明】 基于高光谱图像像元的特殊性质和循环神经网络结合一维卷积网络配合链式分类器实现解混python源码.zip 基于高光谱图像像元的特殊性质和循环神经网络结合一维卷积网络配合链式分类器实现解混python源码.zip 基于高光谱图像像元的特殊性质和循环神经网络结合一维卷积网络配合链式分类器实现解混python源码.zip 基于高光谱图像像元的特殊性质和循环神经网络结合一维卷积网络配合链式分类器实现解混python源码.zip 【备注】 1、该资源内项目代码都经过测试运行成功,功能ok的情况下才上传的,请放心下载使用! 2、本项目适合计算机相关专业(如计科、人工智能、通信工程、自动化、电子信息等)的在校学生、老师或者企业员工下载使用,也适合小白学习进阶,当然也可作为毕设项目、课程设计、作业、项目初期立项演示等。 3、如果基础还行,也可在此代码基础上进行修改,以实现其他功能,也可直接用于毕设、课设、作业等。 欢迎下载,沟通交流,互相学习,共同进步!
资源推荐
资源详情
资源评论





















收起资源包目录








共 6 条
- 1
资源评论


onnx
- 粉丝: 1w+
上传资源 快速赚钱
我的内容管理 展开
我的资源 快来上传第一个资源
我的收益
登录查看自己的收益我的积分 登录查看自己的积分
我的C币 登录后查看C币余额
我的收藏
我的下载
下载帮助


最新资源
- (源码)基于Spring Boot框架的物业管理系统.zip
- (源码)基于Spring Boot和MySQL的医院挂号系统.zip
- (源码)基于Spring Boot和MyBatis Plus的社区管理系统.zip
- (源码)基于Java Spring框架的中小型企业财务管理系统.zip
- (源码)基于Java ssm框架的高校餐厅食品留样管理系统.zip
- (源码)基于Spring框架和MyBatis Plus的校园综合管理系统.zip
- (源码)基于Spring框架的智能社区管理系统.zip
- (源码)基于Java和Spring框架的奖助学金管理系统后端接口.zip
- (源码)基于Java和SSM框架的微信小程序电影交流平台.zip
- 2023年职业技能实训形成性考核计算机应用基础答案.doc
- 电子商务配送中心项目可行性报告范本.doc
- 国家开放大学电大专科《电子商务概论》期末试题4.docx
- 《C++程序设计》期末考试及答案.doc
- MATLAB2009-1-4降落伞(1).doc
- 2023年sql一些笔试题.docx
- (完整版)-JAVA期末考试试卷及答案-推荐文档.pdf
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈



安全验证
文档复制为VIP权益,开通VIP直接复制
