DL EXPERIMENTS
EXP 1 - EXPERIMENT NO.1 A :-
Introduction to TensorFlow and Theano.
import tensorflow as tf
x = tf.constant([[1.,2.,3.],
[4.,5.,6]])
print(x)
print(tf.shape(x))
print(tf.dtypes) # Changed tf.datatype to tf.dtypes
EXPERIMENT NO.1 B
import tensorflow as tf
print("tensorflow_version", tf.version)
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
])
predictions = model(x_train[:1]).numpy()
tf.nn.softmax(predictions).numpy()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
probability_model = tf.keras.Sequential([
model,
tf.keras.layers.Softmax()
])
probability_model(x_test[:5])
EXPERIMENT NO.2
Aim - Implementation of multilayer perceptron
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten # Import Flatten from tensorflow.keras.layers
from tensorflow.keras.layers import Dense # Import Dense from tensorflow.keras.layers
from tensorflow.keras.layers import Activation
import matplotlib.pyplot as plt
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
gray_scale = 255
x_train /= gray_scale
x_test /= gray_scale
print("Feature matrix:", x_train.shape)
print("Target matrix:", x_test.shape)
print("Feature matrix:", y_train.shape)
print("Target matrix:", y_test.shape)
fig, ax = plt.subplots(10, 10)
k = 0
for i in range(10):
for j in range(10):
ax[i][j].imshow(x_train[k].reshape(28, 28),aspect='auto')
k += 1
plt.show()
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(256, activation='sigmoid'),
Dense(128, activation='sigmoid'),
Dense(10, activation='sigmoid'),
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10,
batch_size=2000,
validation_split=0.2)
results = model.evaluate(x_test, y_test, verbose = 0)
print('test loss, test acc:', results)
EXPERIMENT NO.3
Aim - First deep learning project in python with keras step by step
from numpy import loadtxt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
dataset = loadtxt('/content/drive/MyDrive/DL_LAB/EXP 3/diabetes.csv', delimiter=',', skiprows=1)
X = dataset[:,0:8]
y = dataset[:,8]
model = Sequential()
model.add(Dense(12, input_shape=(8,), activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X, y, epochs=150, batch_size=10)
_, accuracy = model.evaluate(X, y)
print('Accuracy: %.2f' % (accuracy*100))
predictions = model.predict(X)
rounded = [round(x[0]) for x in predictions]
predictions = (model.predict(X) > 0.5).astype(int)
EXPERIMENT NO.4
Aim - Building of Simple Deep Neural Network
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import matplotlib.pyplot as plt
# Load the Fashion MNIST dataset
mnist = tf.keras.datasets.fashion_mnist
(train_img, train_label), (test_img, test_label) = mnist.load_data()
# Print training images and labels
print(train_img)
print(f"Train Label: {train_label}")
print(f"Length of Training images: {len(train_img)} \nLength of Testing Images: {len(test_img)}")
print(train_img[0])
print(f"Train Label: {train_label[0]}")
# Display an image from the training dataset
plt.imshow(train_img[100], cmap='gray') # Use gray colormap since MNIST images are grayscale
plt.show()
print(train_label[100])
train_img = train_img / 255
test_img = test_img / 255
model = tf.keras.models.Sequential(
[tf.keras.layers.Flatten(),
tf.keras.layers.Dense(520, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)]
)
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(520, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = tf.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy')
model.fit(train_img, train_label, epochs=5)
model.summary()
model.evaluate(test_img, test_label)
classifications = model.predict(test_img)
print(classifications[69])
plt.imshow(test_img[69])
result = np.where(classifications[69] == max(classifications[69]))
print(f"\nOur prediction: {result[0][0]}")
print(f"\nActual answer: {test_label[69]}")
EXPERIMENT NO.5
Aim- Implementation of simple autoencoder using MNIST dataset
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from keras import layers
from keras.datasets import mnist
(x_train, _), (x_test, _) = mnist.load_data()
print(x_train.shape)
print(x_test.shape)
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)
encoded_dimensions = 32
input_image = keras.Input(shape=(x_train.shape[1],)) # Modified: shape is now a tuple
encoded = layers.Dense(encoded_dimensions, activation='relu')(input_image)
decoded = layers.Dense(x_train.shape[1], activation='sigmoid')(encoded)
autoencoder = keras.Model(inputs=input_image, outputs=decoded)
encoder = keras.Model(inputs = input_image, outputs = encoded)
encoded_input = keras.Input(shape=(encoded_dimensions,))
decoder_layer = autoencoder.layers[-1]
decoder = keras.Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
autoencoder.fit(x_train, x_train, epochs=50, batch_size=256, shuffle=
True, validation_data = (x_test, x_test))
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
n=6
plt.figure(figsize=(20, 4))
for i in range(0, n):
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
Experiment No. 6
Aim - Implementation of Convolution Neural Network on MNIST Fashion dataset
from keras.datasets import fashion_mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
# Load and split the dataset
(trainX, trainy), (testX, testy) = fashion_mnist.load_data()
# Print dataset shapes
print('Train: X = ', trainX.shape)
print('Test: X = ', testX.shape)
# Plot sample images
for i in range(1, 10):
plt.subplot(3, 3, i)
plt.imshow(trainX[i], cmap=plt.get_cmap('gray'))
plt.show()
# Reshape input data to include channel dimension
trainX = np.expand_dims(trainX, -1)
testX = np.expand_dims(testX, -1)
print(trainX.shape)
# Define model architecture
def model_arch():
models = Sequential()
models.add(Conv2D(64, (5, 5), padding="same", activation="relu", input_shape=(28, 28, 1)))
models.add(MaxPooling2D(pool_size=(2, 2)))
models.add(Conv2D(128, (5, 5), padding="same", activation="relu"))
models.add(MaxPooling2D(pool_size=(2, 2)))
models.add(Conv2D(256, (5, 5), padding="same", activation="relu"))
models.add(MaxPooling2D(pool_size=(2, 2)))
models.add(Flatten())
models.add(Dense(256, activation="relu"))
models.add(Dense(10, activation="softmax"))
return models
# Compile the model
model = model_arch()
model.compile(optimizer=Adam(learning_rate=1e-3),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
model.summary()
# Train the model
history = model.fit(trainX.astype(np.float32), trainy.astype(np.float32),
epochs=10, validation_split=0.2)
# Plot accuracy vs epoch
plt.plot(history.history['sparse_categorical_accuracy'])
plt.plot(history.history['val_sparse_categorical_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# Plot loss vs epoch
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# Prediction
labels = ['t_shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle_boots']
predictions = model.predict(testX[:1])
label = labels[np.argmax(predictions)]
print(label)
plt.imshow(testX[:1][0])
plt.show()
Experiment No. 7
Aim - Design and implementation of cnn mnist dataset
import tensorflow as tf
from tensorflow.keras import layers,models
from tensorflow import keras
import numpy as np
(X_train, y_train) , (X_test, y_test) = keras.datasets.mnist.load_data()
X_train = X_train / 255
X_test = X_test / 255
X_train = X_train.reshape(-1,28,28,1) #training set
X_test = X_test.reshape(-1,28,28,1) #test set
convolutional_neural_network = models.Sequential([
layers.Conv2D(filters=25, kernel_size=(3, 3), activation='relu',
input_shape=(28,28,1)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])
convolutional_neural_network.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
convolutional_neural_network.fit(X_train, y_train, epochs=10)
convolutional_neural_network.evaluate(X_test, y_test) # Use the defined variable 'convolutional_neural_network' instead of 'Model'
y_predicted = convolutional_neural_network.predict(X_test) # Use the defined variable 'convolutional_neural_network' instead of 'Model'
y_predicted #getting probability score for each class digits
np.argmax(y_predicted[0])
y_predicted_labels = [np.argmax(i) for i in y_predicted]
y_predicted_labels[:5]
Experiment No. 8
Aim - Implement Long Short Term Memory (LSTM) Recurrent Neural Network (RNN) using
Google Colab.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Load the training dataset
dataset_train = pd.read_csv('/content/drive/MyDrive/DL_LAB/EXP 7/Google_Stock_Price_Train.csv') # Make sure the path is correct
training_set = dataset_train.iloc[:, 1:2].values
from sklearn.preprocessing import MinMaxScaler
# Scale the training data
sc = MinMaxScaler(feature_range=(0, 1))
training_set_scaled = sc.fit_transform(training_set)
# Prepare the data for the LSTM
x_train = []
y_train = []
for i in range(60, len(training_set_scaled)): # Use the length of the scaled training set
x_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
# Reshape the data for LSTM input
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Build the LSTM model
regressor = Sequential()
# Add the first LSTM layer
regressor.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
regressor.add(Dropout(0.2))
# Add the second LSTM layer
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.2))
# Add the third LSTM layer
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.2))
# Add the fourth LSTM layer
regressor.add(LSTM(units=50))
regressor.add(Dropout(0.2))
# Add the output layer
regressor.add(Dense(units=1))
# Compile the model
regressor.compile(optimizer='adam', loss='mean_squared_error')
# Fit the model to the training data
regressor.fit(x_train, y_train, epochs=100, batch_size=32)
dataset_test = pd.read_csv('/content/drive/MyDrive/DL_LAB/EXP 7/Google_Stock_Price_Train.csv')
real_stock_price = dataset_test.iloc[:, 1:2].values
dataset_total = pd.concat((dataset_train['Open'],dataset_test['Open']),axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test)-60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(60,80):
X_test.append(inputs[i-60:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
plt.plot(real_stock_price, color = 'red', label = 'Real Google StockPrice')
plt.plot(predicted_stock_price, color = 'blue', label = 'PredictedGoogle Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()