SC Assignment Q2
SC Assignment Q2
In [2]:
#Reading data
data=pd.read_csv('Iris.csv', index_col=False)
data.describe()
In [3]:
print("\nInfo of the data:\n")
data.info()
<class 'pandas.core.frame.DataFrame'>
In [4]:
print("\n10 first samples of the dataset:")
data.head(10)
In [5]:
print("\n10 last samples of the dataset:")
data.tail(10)
In [6]:
sns.lmplot(x ='SepalLengthCm', y ='SepalWidthCm',
data=data,
fit_reg=False,
hue="Species",
scatter_kws={"marker": "D",
"s": 50})
plt.title('SepalLength vs SepalWidth')
sns.lmplot(x='PetalLengthCm', y='PetalWidthCm',
data=data,
fit_reg=False,
hue="Species",
scatter_kws={"marker": "D",
"s": 50})
plt.title('PetalLength vs PetalWidth')
sns.lmplot(x='SepalLengthCm', y='PetalLengthCm',
data=data,
fit_reg=False,
hue="Species",
scatter_kws={"marker": "D",
"s": 50})
plt.title('SepalLength vs PetalLength')
sns.lmplot(x='SepalWidthCm', y='PetalWidthCm',
data=data,
fit_reg=False,
hue="Species",
scatter_kws={"marker": "D",
"s": 50},)
plt.title('SepalWidth vs PetalWidth')
plt.show()
In [7]:
print(data["Species"].unique())
In [8]:
data.loc[data["Species"]=="Iris-setosa","Species"]=0
data.loc[data["Species"]=="Iris-versicolor","Species"]=1
data.loc[data["Species"]=="Iris-virginica","Species"]=2
data.head()
In [9]:
data=data.iloc[np.random.permutation(len(data))]
print(data.head())
In [10]:
X=data.iloc[:,1:5].values
y=data.iloc[:,5].values
print("Shape of X",X.shape)
print("Shape of y",y.shape)
print("Examples of X\n",X[:3])
print("Examples of y\n",y[:3])
Shape of X (150, 4)
Shape of y (150,)
Examples of X
Examples of y
[0 0 0]
In [11]:
X_normalized=normalize(X,axis=0)
print("Examples of X_normalised\n",X_normalized[:3])
Examples of X_normalised
In [12]:
#Creating train,test and validation data
'''
'''
total_length=len(data)
train_length=int(0.8*total_length)
test_length=int(0.2*total_length)
X_train=X_normalized[:train_length]
X_test=X_normalized[train_length:]
y_train=y[:train_length]
y_test=y[train_length:]
In [14]:
#Change the label to one hot vector
'''
[0]--->[1 0 0]
[1]--->[0 1 0]
[2]--->[0 0 1]
'''
y_train=np_utils.to_categorical(y_train,num_classes=3)
y_test=np_utils.to_categorical(y_test,num_classes=3)
print("Shape of y_train",y_train.shape)
print("Shape of y_test",y_test.shape)
In [15]:
model=Sequential()
model.add(Dense(1000,input_dim=4,activation='relu'))
model.add(Dense(500,activation='relu'))
model.add(Dense(300,activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(3,activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
In [16]:
model.summary()
Model: "sequential"
_________________________________________________________________
=================================================================
_________________________________________________________________
_________________________________________________________________
_________________________________________________________________
_________________________________________________________________
=================================================================
Non-trainable params: 0
_________________________________________________________________
In [19]:
model.fit(X_train,y_train,validation_data=(X_test,y_test),batch_size=20,epochs=10,ve
Epoch 1/10
Epoch 2/10
Epoch 3/10
Epoch 4/10
Epoch 5/10
Epoch 6/10
Epoch 7/10
Epoch 8/10
Epoch 9/10
Epoch 10/10
<keras.callbacks.History at 0x1f4bfc61bb0>
Out[19]:
In [20]:
prediction=model.predict(X_test)
length=len(prediction)
y_label=np.argmax(y_test,axis=1)
predict_label=np.argmax(prediction,axis=1)
accuracy=np.sum(y_label==predict_label)/length * 100
An accuracy of 100% is achieved in this dataset.It can be asserted that for each
epoch, the neural network is trying to learn from its existing feature and
predict it by its weights and biases. For each epoch, the weights and biases
and changed by subtracting its rate to get a better accuracy each time.
In [ ]: