失眠网,内容丰富有趣,生活中的好帮手!
失眠网 > RNN LSTM GRU实战——imdb情感分析

RNN LSTM GRU实战——imdb情感分析

时间:2023-03-18 15:54:09

相关推荐

RNN LSTM GRU实战——imdb情感分析

Day40

RNN、LSTM、GRU实战——imdb情感分析RNNCell模式Layer模式LSTMCell模式Layer模式GRUCell模式Layer模式

RNN、LSTM、GRU实战——imdb情感分析

RNN

Cell模式

import osimport tensorflow as tfimport numpy as npfrom tensorflow import kerasfrom tensorflow.keras import layerstf.random.set_seed(22)np.random.seed(22)os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'assert tf.__version__.startswith('2.')batchsz = 128# the most frequest wordstotal_words = 10000max_review_len = 80embedding_len = 100(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=total_words)# x_train:[b, 80]# x_test: [b, 80]x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_review_len)x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_review_len)db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True)db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))db_test = db_test.batch(batchsz, drop_remainder=True)print('x_train shape:', x_train.shape, tf.reduce_max(y_train), tf.reduce_min(y_train))print('x_test shape:', x_test.shape)class MyRNN(keras.Model):def __init__(self, units):super(MyRNN, self).__init__()# [b, 64]self.state0 = [tf.zeros([batchsz, units])]self.state1 = [tf.zeros([batchsz, units])]# transform text to embedding representation# [b, 80] => [b, 80, 100]self.embedding = layers.Embedding(total_words, embedding_len,input_length=max_review_len)# [b, 80, 100] , h_dim: 64# RNN: cell1 ,cell2, cell3# SimpleRNNself.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.5)self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.5)# fc, [b, 80, 100] => [b, 64] => [b, 1]self.outlayer = layers.Dense(1)def call(self, inputs, training=None):"""net(x) net(x, training=True) :train modenet(x, training=False): test:param inputs: [b, 80]:param training::return:"""# [b, 80]x = inputs# embedding: [b, 80] => [b, 80, 100]x = self.embedding(x)# rnn cell compute# [b, 80, 100] => [b, 64]state0 = self.state0state1 = self.state1for word in tf.unstack(x, axis=1): # word: [b, 100]# h1 = x*wxh+h0*whh# out0: [b, 64]out0, state0 = self.rnn_cell0(word, state0, training)# out1: [b, 64]out1, state1 = self.rnn_cell1(out0, state1, training)# out: [b, 64] => [b, 1]x = self.outlayer(out1)# p(y is pos|x)prob = tf.sigmoid(x)return probdef main():units = 64epochs = 4model = MyRNN(units)pile(optimizer = keras.optimizers.Adam(0.001),loss = tf.losses.BinaryCrossentropy(),metrics=['accuracy'])model.fit(db_train, epochs=epochs, validation_data=db_test)model.evaluate(db_test)if __name__ == '__main__':main()

Layer模式

import osimport tensorflow as tfimport numpy as npfrom tensorflow import kerasfrom tensorflow.keras import layerstf.random.set_seed(22)np.random.seed(22)os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'assert tf.__version__.startswith('2.')batchsz = 128# the most frequest wordstotal_words = 10000max_review_len = 80embedding_len = 100(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=total_words)# x_train:[b, 80]# x_test: [b, 80]x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_review_len)x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_review_len)db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True)db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))db_test = db_test.batch(batchsz, drop_remainder=True)print('x_train shape:', x_train.shape, tf.reduce_max(y_train), tf.reduce_min(y_train))print('x_test shape:', x_test.shape)class MyRNN(keras.Model):def __init__(self, units):super(MyRNN, self).__init__()# transform text to embedding representation# [b, 80] => [b, 80, 100]self.embedding = layers.Embedding(total_words, embedding_len,input_length=max_review_len)# [b, 80, 100] , h_dim: 64self.rnn = keras.Sequential([layers.SimpleRNN(units, dropout=0.5, return_sequences=True, unroll=True),layers.SimpleRNN(units, dropout=0.5, unroll=True)])# fc, [b, 80, 100] => [b, 64] => [b, 1]self.outlayer = layers.Dense(1)def call(self, inputs, training=None):"""net(x) net(x, training=True) :train modenet(x, training=False): test:param inputs: [b, 80]:param training::return:"""# [b, 80]x = inputs# embedding: [b, 80] => [b, 80, 100]x = self.embedding(x)# rnn cell compute# x: [b, 80, 100] => [b, 64]x = self.rnn(x)# out: [b, 64] => [b, 1]x = self.outlayer(x)# p(y is pos|x)prob = tf.sigmoid(x)return probdef main():units = 64epochs = 4model = MyRNN(units)pile(optimizer = keras.optimizers.Adam(0.001),loss = tf.losses.BinaryCrossentropy(),metrics=['accuracy'])model.fit(db_train, epochs=epochs, validation_data=db_test)model.evaluate(db_test)if __name__ == '__main__':main()

LSTM

Cell模式

import osimport tensorflow as tfimport numpy as npfrom tensorflow import kerasfrom tensorflow.keras import layerstf.random.set_seed(1111)np.random.seed(2222)os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'assert tf.__version__.startswith('2.')batchsz = 128total_words =10000max_review_len = 80embedding_len = 100(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=total_words)# x_train, x_test [b, 80]x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_review_len)x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_review_len)db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True)db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))db_test = db_test.batch(batchsz, drop_remainder=True)print('x_train shape:', x_train.shape, tf.reduce_max(y_train), tf.reduce_min(y_train))print('x_test shape:', x_test.shape)class MyRNN(keras.Model):def __init__(self, units):super(MyRNN, self).__init__()# [b, 64]self.state0 = [tf.zeros([batchsz, units]), tf.zeros([batchsz, units])]self.state1 = [tf.zeros([batchsz, units]), tf.zeros([batchsz, units])]# [b, 80] => [b, 80, 100]self.embedding = layers.Embedding(total_words, embedding_len, input_length=max_review_len)# [b, 80, 100] => h_dim: 64# self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.2)# self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.2)self.rnn_cell0 = layers.LSTMCell(units, dropout=0.2)self.rnn_cell1 = layers.LSTMCell(units, dropout=0.2)# fc, [b, 80, 100] => [b, 64] => [b, 1]self.outlayer = layers.Dense(1)def call(self, inputs, training=None):# [b, 80]x = inputs# embedding: [b, 80] => [b, 80, 100]x = self.embedding(x)# [b, 80, 100] => [b, 64]state0 = self.state0state1 = self.state1for word in tf.unstack(x, axis=1): # word: [b, 100]# h1 = x*wxh + h * whhout0, state0 = self.rnn_cell0(word, state0, training)out1, state1 = self.rnn_cell1(out0, state1, training)x = self.outlayer(out1)prob = tf.sigmoid(x)return probdef main():units = 64epochs = 4model = MyRNN(units)pile(optimizer=keras.optimizers.Adam(0.001), loss=tf.losses.BinaryCrossentropy(), metrics=['accuracy'])model.fit(db_train, epochs=epochs, validation_data=db_test)model.evaluate(db_test)if __name__ == '__main__':main()

Layer模式

import osimport tensorflow as tfimport numpy as npfrom tensorflow import kerasfrom tensorflow.keras import layerstf.random.set_seed(22)np.random.seed(22)os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'assert tf.__version__.startswith('2.')batchsz = 128# the most frequest wordstotal_words = 10000max_review_len = 80embedding_len = 100(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=total_words)# x_train:[b, 80]# x_test: [b, 80]x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_review_len)x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_review_len)db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True)db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))db_test = db_test.batch(batchsz, drop_remainder=True)print('x_train shape:', x_train.shape, tf.reduce_max(y_train), tf.reduce_min(y_train))print('x_test shape:', x_test.shape)class MyRNN(keras.Model):def __init__(self, units):super(MyRNN, self).__init__()# transform text to embedding representation# [b, 80] => [b, 80, 100]self.embedding = layers.Embedding(total_words, embedding_len,input_length=max_review_len)# [b, 80, 100] , h_dim: 64self.rnn = keras.Sequential([layers.LSTM(units, dropout=0.5, return_sequences=True, unroll=True),layers.LSTM(units, dropout=0.5, unroll=True)])# fc, [b, 80, 100] => [b, 64] => [b, 1]self.outlayer = layers.Dense(1)def call(self, inputs, training=None):"""net(x) net(x, training=True) :train modenet(x, training=False): test:param inputs: [b, 80]:param training::return:"""# [b, 80]x = inputs# embedding: [b, 80] => [b, 80, 100]x = self.embedding(x)# rnn cell compute# x: [b, 80, 100] => [b, 64]x = self.rnn(x)# out: [b, 64] => [b, 1]x = self.outlayer(x)# p(y is pos|x)prob = tf.sigmoid(x)return probdef main():units = 64epochs = 4model = MyRNN(units)pile(optimizer = keras.optimizers.Adam(0.001),loss = tf.losses.BinaryCrossentropy(),metrics=['accuracy'])model.fit(db_train, epochs=epochs, validation_data=db_test)model.evaluate(db_test)if __name__ == '__main__':main()

GRU

Cell模式

import osimport tensorflow as tfimport numpy as npfrom tensorflow import kerasfrom tensorflow.keras import layerstf.random.set_seed(22)np.random.seed(22)os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'assert tf.__version__.startswith('2.')batchsz = 128# the most frequest wordstotal_words = 10000max_review_len = 80embedding_len = 100(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=total_words)# x_train:[b, 80]# x_test: [b, 80]x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_review_len)x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_review_len)db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True)db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))db_test = db_test.batch(batchsz, drop_remainder=True)print('x_train shape:', x_train.shape, tf.reduce_max(y_train), tf.reduce_min(y_train))print('x_test shape:', x_test.shape)class MyRNN(keras.Model):def __init__(self, units):super(MyRNN, self).__init__()# [b, 64]self.state0 = [tf.zeros([batchsz, units])]self.state1 = [tf.zeros([batchsz, units])]# transform text to embedding representation# [b, 80] => [b, 80, 100]self.embedding = layers.Embedding(total_words, embedding_len,input_length=max_review_len)# [b, 80, 100] , h_dim: 64# RNN: cell1 ,cell2, cell3# SimpleRNNself.rnn_cell0 = layers.GRUCell(units, dropout=0.5)self.rnn_cell1 = layers.GRUCell(units, dropout=0.5)# fc, [b, 80, 100] => [b, 64] => [b, 1]self.outlayer = layers.Dense(1)def call(self, inputs, training=None):"""net(x) net(x, training=True) :train modenet(x, training=False): test:param inputs: [b, 80]:param training::return:"""# [b, 80]x = inputs# embedding: [b, 80] => [b, 80, 100]x = self.embedding(x)# rnn cell compute# [b, 80, 100] => [b, 64]state0 = self.state0state1 = self.state1for word in tf.unstack(x, axis=1): # word: [b, 100]# h1 = x*wxh+h0*whh# out0: [b, 64]out0, state0 = self.rnn_cell0(word, state0, training)# out1: [b, 64]out1, state1 = self.rnn_cell1(out0, state1, training)# out: [b, 64] => [b, 1]x = self.outlayer(out1)# p(y is pos|x)prob = tf.sigmoid(x)return probdef main():units = 64epochs = 4model = MyRNN(units)pile(optimizer = keras.optimizers.Adam(0.001),loss = tf.losses.BinaryCrossentropy(),metrics=['accuracy'])model.fit(db_train, epochs=epochs, validation_data=db_test)model.evaluate(db_test)if __name__ == '__main__':main()

Layer模式

import osimport tensorflow as tfimport numpy as npfrom tensorflow import kerasfrom tensorflow.keras import layerstf.random.set_seed(22)np.random.seed(22)os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'assert tf.__version__.startswith('2.')batchsz = 64# the most frequest wordstotal_words = 10000max_review_len = 80embedding_len = 100(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=total_words)# x_train:[b, 80]# x_test: [b, 80]x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_review_len)x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_review_len)db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True)db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))db_test = db_test.batch(batchsz, drop_remainder=True)print('x_train shape:', x_train.shape, tf.reduce_max(y_train), tf.reduce_min(y_train))print('x_test shape:', x_test.shape)class MyRNN(keras.Model):def __init__(self, units):super(MyRNN, self).__init__()# transform text to embedding representation# [b, 80] => [b, 80, 100]self.embedding = layers.Embedding(total_words, embedding_len,input_length=max_review_len)# [b, 80, 100] , h_dim: 64self.rnn = keras.Sequential([layers.GRU(units, dropout=0.5, return_sequences=True, unroll=True),layers.GRU(units, dropout=0.5, unroll=True)])# fc, [b, 80, 100] => [b, 64] => [b, 1]self.outlayer = layers.Dense(1)def call(self, inputs, training=None):"""net(x) net(x, training=True) :train modenet(x, training=False): test:param inputs: [b, 80]:param training::return:"""# [b, 80]x = inputs# embedding: [b, 80] => [b, 80, 100]x = self.embedding(x)# rnn cell compute# x: [b, 80, 100] => [b, 64]x = self.rnn(x)# out: [b, 64] => [b, 1]x = self.outlayer(x)# p(y is pos|x)prob = tf.sigmoid(x)return probdef main():units = 64epochs = 4model = MyRNN(units)pile(optimizer = keras.optimizers.Adam(0.001),loss = tf.losses.BinaryCrossentropy(),metrics=['accuracy'])model.fit(db_train, epochs=epochs, validation_data=db_test)model.evaluate(db_test)if __name__ == '__main__':main()

如果觉得《RNN LSTM GRU实战——imdb情感分析》对你有帮助,请点赞、收藏,并留下你的观点哦!

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。