Skip to content

Instantly share code, notes, and snippets.

View plushycat's full-sized avatar
💭
Always thinking about what's to come, while figuring out what's happening rn.

Hemanth SSR plushycat

💭
Always thinking about what's to come, while figuring out what's happening rn.
View GitHub Profile
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_breast_cancer
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix, classification_report
# Load and preprocess data
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
# Load and split data
X, y = fetch_olivetti_faces(shuffle=True, random_state=42, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.metrics import accuracy_score
# Load and split data
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_squared_error, r2_score
def evaluate_and_plot(X_test, y_test, y_pred, xlabel, ylabel, title, is_poly=False):
import numpy as np
import matplotlib.pyplot as plt
def locally_weighted_regression(x, X, y, tau):
w = np.exp(-np.sum((X - x)**2, axis=1) / (2 * tau**2))
W = np.diag(w)
theta = np.linalg.pinv(X.T @ W @ X) @ X.T @ W @ y
return x @ theta
# Data
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
# Generate data
np.random.seed(42) # Added for reproducibility
data = np.random.rand(40)
train_data, test_data = data[:20], data[20:]
train_labels = ["Class1" if x <= 0.5 else "Class2" for x in train_data]
import pandas as pd
def find_s_algorithm(file_path):
data = pd.read_csv(file_path)
print("Training data:")
print(data)
attributes = data.columns[:-1]
class_label = data.columns[-1]
hypothesis = ['?' for _ in attributes]
for index, row in data.iterrows():
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
# Load data and perform PCA
iris = load_iris()
data_reduced = PCA(n_components=2).fit_transform(iris.data)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_california_housing
# Step 1: Load the California Housing Dataset
california_data = fetch_california_housing(as_frame=True)
data = california_data.frame
# Step 2: Compute the correlation matrix
correlation_matrix = data.corr()
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_california_housing
# Load dataset and select numerical features
housing_df = fetch_california_housing(as_frame=True).frame
numerical_features = housing_df.select_dtypes(include=[np.number]).columns