Deep Learning Programs Updated
Deep Learning Programs Updated
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import random
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train / 255.0
x_test = x_test / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=5)
Output
PROGRAM – 2
Build the image classification model by dividing the model into 4 stages:
import numpy as np
x=np.array([[0,1],[1,0],[1,1],[0,0]])
y=np.array([[1],[1],[0],[0]])
num_input=2
num_hidden=1
num_output=1
wxh=np.random.randn(num_input,num_hidden)
bh=np.zeros((1,num_hidden))
why=np.random.randn(num_hidden,num_output)
def sigmoid(z):
return 1/(1+np.exp(-z))
def sigmoid_derivative(z):
return sigmoid(z)*(1-sigmoid(z))
def forward_prop(x,wxh,why):
z1 = np.dot(x,wxh)+bh
a1 = sigmoid(z1)
z2 = np.dot(a1,why)
return z1,a1,z2
def cost_function(y,y_hat):
return 0.5*np.sum((y-y_hat)**2)
def backward_prop(x,y,z1,a1,z2,why):
m=y.shape[0]
dl_dz2=z2-y
dl_da1=np.dot(dl_dz2,why.T)
dl_dz1=dl_da1 * sigmoid_derivative(z1)
dj_dwhy=np.dot(a1.T,dl_dz2)/m
dj_dwxh=np.dot(x.T,dl_dz1)/m
dj_dbh=np.sum(dl_dz1,axis=0,keepdims=True)/m
return dj_dwxh,dj_dwhy,dj_dbh
n=0.01
num_iterations = 5000
costs=[]
for i in range(num_iterations):
z1,a1,z2=forward_prop(x,wxh,why)
y_hat=z2
cost=cost_function(y,y_hat)
costs.append(cost)
dj_dwxh,dj_dwhy,dj_dbh=backward_prop(x,y,z1,a1,z2,why)
wxh -=n*dj_dwxh
why -=n*dj_dwhy
bh -=n*dj_dbh
plt.grid()
plt.plot(range(num_iterations),costs)
plt.title('cost funtion')
plt.xlabel('Training Iterations')
plt.ylabel('cost')
plt.show()
z1,hidden_layer_activation,output=forward_prop(x,wxh,why)
[[0.51499412]
[0.55763691]
[0.61069325]
[0.35298938]]
PROGRAM 3
Build the image classification model by dividing the model into four stages
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import random
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.summary()
model.compile(optimizer="sgd",
loss="sparse_categorical_crossentropy",
metrics=['accuracy'])
plt.show()
# You can also use the model to predict on new data using the `model.predict()` method.
OUTPUT
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
# Apply convolution
conv = F.conv2d(image, kernel, stride=1, padding=0)
# Plotting
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
# Original Image
ax[0].imshow(image.squeeze().numpy(), cmap='gray')
ax[0].set_title('Input Image')
ax[0].axis('off')
# After Convolution
ax[1].imshow(conv.squeeze().detach().numpy(), cmap='gray')
ax[1].set_title('After Convolution')
ax[1].axis('off')
# After Max Pooling
pooled_image = pooled.squeeze(0).squeeze(0)
ax[2].imshow(pooled_image.detach().numpy(), cmap='gray')
ax[2].set_title('After Max Pooling')
ax[2].axis('off')
plt.show()
Output:
PROGRAM 5
Write a program to perform Sentiment Analysis Using RNN
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense
vocab_size = 5000
maxlen = 500
batch_size = 64
epochs = 3
model = Sequential([
Embedding(vocab_size, 32, input_length=maxlen),
LSTM(100),
Dense(1, activation='sigmoid')
])
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
print(model.summary())
model.fit(
x_train, y_train,
validation_split=0.1,
batch_size=batch_size,
epochs=epochs
)
loss,acc=model.evaluate(x_test,y_test)
print(f"loss: {loss}\naccuracy: {acc}")
Output:
PROGRAM 6
Write a program to implement an LSTM based Auto encoding in Tensorflow/Keras
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from tensorflow import keras
# 4. Train/Test split
TRAIN_RATIO = 0.9
train_size = int(len(df) * TRAIN_RATIO)
train_df = df.iloc[:train_size].copy()
test_df = df.iloc[train_size:].copy()
TIME_STEPS = 30
X_train, y_train = create_dataset(train_df['close'], TIME_STEPS)
X_test, y_test = create_dataset(test_df['close'], TIME_STEPS)
# 9. Plot losses
plt.figure(figsize=(8,4))
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'],label='validation loss')
plt.title('LSTM Autoencoder Loss')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend()
plt.show()
X_test_pred = model.predict(X_test, verbose=0)
test_mse = np.mean((X_test_pred - X_test)**2, axis=(1,2))
print("Average test MSE:", test_mse.mean())
PROGRAM 7
Write a Program to implement image generation using GAN
import os
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Input, InputLayer, Dense, Reshape, Flatten, LeakyReLU,
BatchNormalization, Dropout
from keras.optimizers import Adam
import matplotlib.pyplot as plt
def build_discriminator(img_shape=(28,28,1)):
seq = Sequential([
InputLayer(input_shape=img_shape),
Flatten(),
Dense(512), LeakyReLU(0.2), Dropout(0.3),
Dense(256), LeakyReLU(0.2), Dropout(0.3),
Dense(1, activation='sigmoid')
])
img = Input(shape=img_shape)
D = Model(img, seq(img))
D.compile(optimizer=Adam(2e-4, 0.5), loss='binary_crossentropy',
metrics=['accuracy'])
return D
OUTPUT