In [ ]:
import numpy as np
# Creating an array
array = np.array([1, 2, 3, 4])
# Perform arithmetic operations
array = array * 2
# Matrix multiplication
matrix1 = np.array([[1, 2], [3, 4]])
matrix2 = np.array([[5, 6], [7, 8]])
result = np.dot(matrix1, matrix2)
result
Out[ ]:
array([[19, 22], [43, 50]])
In [ ]:
import pandas as pd
data = {'Name': ['Alice', 'Bob', 'Charlie'], 'Age': [25, 30, 35]}
df = pd.DataFrame(data)
ages = df['Age']
filtered_df = df[df['Age'] > 25]
filtered_df
Out[ ]:
Name | Age | |
---|---|---|
1 | Bob | 30 |
2 | Charlie | 35 |
In [ ]:
import matplotlib.pyplot as plt
x = [1, 2, 3, 4]
y = [10, 20, 25, 30]
plt.plot(x, y)
plt.xlabel('X Axis')
plt.ylabel('Y Axis')
plt.title('Sample Line Plot')
plt.show()
In [ ]:
from scipy import optimize
def f(x):
return x**2 + 5*np.sin(x)
result = optimize.minimize(f, x0=0)
print(result)
message: Optimization terminated successfully. success: True status: 0 fun: -3.2463942726915387 x: [-1.111e+00] nit: 5 jac: [-2.980e-08] hess_inv: [[ 1.544e-01]] nfev: 12 njev: 6
In [ ]:
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
Accuracy: 0.9333333333333333
In [ ]:
import torch
a = torch.tensor([1.0, 2.0, 3.0])
b = torch.tensor([4.0, 5.0, 6.0])
result = a + b
print(result)
tensor([5., 7., 9.])
In [ ]:
import numpy as np
# Creating an array from a list
arr = np.array([1, 2, 3, 4])
print(arr)
[1 2 3 4]
In [ ]:
zeros_arr = np.zeros((3, 3))
print(zeros_arr)
[[0. 0. 0.] [0. 0. 0.] [0. 0. 0.]]
In [ ]:
ones_arr = np.ones((2, 2))
print(ones_arr)
[[1. 1.] [1. 1.]]
In [ ]:
arr = np.arange(0, 10, 2)
print(arr)
[0 2 4 6 8]
In [ ]:
arr = np.linspace(0, 1, 5)
print(arr)
[0. 0.25 0.5 0.75 1. ]
In [ ]:
arr = np.array([1, 2, 3, 4, 5, 6])
reshaped_arr = arr.reshape((2, 3))
print(reshaped_arr)
[[1 2 3] [4 5 6]]
In [ ]:
arr = np.array([[1, 2], [3, 4]])
transposed_arr = np.transpose(arr)
print(transposed_arr)
[[1 3] [2 4]]
In [ ]:
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
concatenated_arr = np.concatenate((arr1, arr2))
print(concatenated_arr)
[1 2 3 4 5 6]
In [ ]:
arr = np.array([[1, 2, 3], [4, 5, 6]])
sum_arr = np.sum(arr)
print(sum_arr)
21
In [ ]:
arr = np.array([1, 2, 3, 4, 5])
mean_val = np.mean(arr)
print(mean_val)
3.0
In [ ]:
arr = np.array([1, 2, 3, 4, 5])
max_val = np.max(arr)
min_val = np.min(arr)
print(f"Max: {max_val}, Min: {min_val}")
Max: 5, Min: 1
In [ ]:
arr = np.array([1, 4, 9, 16])
sqrt_arr = np.sqrt(arr)
print(sqrt_arr)
[1. 2. 3. 4.]
In [ ]:
arr1 = np.array([1, 2])
arr2 = np.array([3, 4])
dot_product = np.dot(arr1, arr2)
print(dot_product)
11
In [ ]:
arr = np.array([1, 2, 3, 4, 5])
std_val = np.std(arr)
print(std_val)
1.4142135623730951
In [ ]:
var_val = np.var(arr)
print(var_val)
2.0
In [ ]:
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
correlation = np.corrcoef(arr1, arr2)
print(correlation)
[[1. 1.] [1. 1.]]
In [ ]:
import pandas as pd
series = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
print(series)
a 1 b 2 c 3 d 4 dtype: int64
In [ ]:
data = {'Name': ['Alice', 'Bob', 'Charlie'], 'Age': [25, 30, 35]}
df = pd.DataFrame(data)
print(df)
Name Age 0 Alice 25 1 Bob 30 2 Charlie 35
In [ ]:
print(df.loc[0])
Name Alice Age 25 Name: 0, dtype: object
In [ ]:
print(df.iloc[1])
Name Bob Age 30 Name: 1, dtype: object
In [ ]:
print(df.at[0, 'Name'])
Alice
In [ ]:
print(df.head(2))
Name Age 0 Alice 25 1 Bob 30
In [ ]:
print(df.tail(1))
Name Age 2 Charlie 35
In [ ]:
df = df.drop(columns=['Age'])
print(df)
Name 0 Alice 1 Bob 2 Charlie
In [ ]:
df = df.rename(columns={'Name': 'Full Name'})
print(df)
Full Name 0 Alice 1 Bob 2 Charlie
In [ ]:
df = pd.DataFrame({'Name': ['Alice', 'Bob', None], 'Age': [25, None, 35]})
print(df.isnull())
Name Age 0 False False 1 False True 2 True False
In [ ]:
df_filled = df.fillna({'Name': 'Unknown', 'Age': 0})
print(df_filled)
Name Age 0 Alice 25.0 1 Bob 0.0 2 Unknown 35.0
In [ ]:
df_clean = df.dropna()
print(df_clean)
Name Age 0 Alice 25.0
In [ ]:
data = {'Name': ['Alice', 'Bob', 'Charlie', 'Alice'], 'Age': [25, 30, 35, 28]}
df = pd.DataFrame(data)
grouped = df.groupby('Name').mean()
print(grouped)
Age Name Alice 26.5 Bob 30.0 Charlie 35.0
In [ ]:
df = pd.DataFrame({'Age': [25, 30, 35], 'Score': [85, 90, 95]})
mean_vals = df.mean()
print(mean_vals)
Age 30.0 Score 90.0 dtype: float64
In [ ]:
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
import seaborn as sns
import matplotlib.pyplot as plt
In [ ]:
from sklearn.datasets import load_iris
# Load the iris dataset
iris = load_iris()
X = iris.data # Features
y = iris.target # Target (species)
# Convert it into a DataFrame for better visualization
iris_df = pd.DataFrame(X, columns=iris.feature_names)
iris_df['species'] = y
print(iris_df.head())
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) \ 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 species 0 0 1 0 2 0 3 0 4 0
In [ ]:
# Split the data into 80% training and 20% testing
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
In [ ]:
# Create a KNN classifier with K=3
knn = KNeighborsClassifier(n_neighbors=3)
# Train the model using the training data
knn.fit(X_train, y_train)
Out[ ]:
KNeighborsClassifier(n_neighbors=3)In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
KNeighborsClassifier(n_neighbors=3)
In [ ]:
# Predict the target values (species) for the test set
y_pred = knn.predict(X_test)
# Evaluate the model
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy * 100:.2f}%")
# Confusion Matrix
conf_matrix = confusion_matrix(y_test, y_pred)
print("Confusion Matrix:")
print(conf_matrix)
# Classification Report
print("Classification Report:")
print(classification_report(y_test, y_pred))
Accuracy: 100.00% Confusion Matrix: [[10 0 0] [ 0 9 0] [ 0 0 11]] Classification Report: precision recall f1-score support 0 1.00 1.00 1.00 10 1 1.00 1.00 1.00 9 2 1.00 1.00 1.00 11 accuracy 1.00 30 macro avg 1.00 1.00 1.00 30 weighted avg 1.00 1.00 1.00 30
In [ ]:
# Plot the confusion matrix
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=iris.target_names, yticklabels=iris.target_names)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix of KNN Model')
plt.show()