Skip to content

Commit

Permalink
fix: removed answers
Browse files Browse the repository at this point in the history
  • Loading branch information
agherasie committed Dec 24, 2023
1 parent 7002d6e commit b91a4b9
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 52 deletions.
59 changes: 25 additions & 34 deletions AI/Day2/linear-regression.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -66,21 +66,21 @@
"metadata": {},
"outputs": [],
"source": [
"# ###\n",
"# # There's clearly a pattern in Brad's data, use simple math to solve his problem.\n",
"# # The goal of this exercise is to make sure you remember what you learned yesterday.\n",
"# # It goes without saying that you must use some of the libs from yesterday in order to solve this exercise !\n",
"# ###\n",
"###\n",
"# There's clearly a pattern in Brad's data, use simple math to solve his problem.\n",
"# The goal of this exercise is to make sure you remember what you learned yesterday.\n",
"# It goes without saying that you must use some of the libs from yesterday in order to solve this exercise !\n",
"###\n",
"\n",
"# def plot_prices(x: np.array, y: np.array):\n",
"# \"\"\"This function should plot house the given house prices\"\"\"\n",
"# pass\n",
"def plot_prices(x: np.array, y: np.array):\n",
" \"\"\"This function should plot house the given house prices\"\"\"\n",
" pass\n",
"\n",
"# def get_data(filepath = \"./data/houses.csv\") -> Tuple[np.array, np.array]:\n",
"# \"\"\"This function should retrieve the data from a csv file and transform it into a numpy array\"\"\"\n",
"# pass\n",
"def get_data(filepath = \"./data/houses.csv\") -> Tuple[np.array, np.array]:\n",
" \"\"\"This function should retrieve the data from a csv file and transform it into a numpy array\"\"\"\n",
" pass\n",
"\n",
"# plot_prices(*get_data())"
"plot_prices(*get_data())"
]
},
{
Expand Down Expand Up @@ -140,7 +140,7 @@
"def forward(x):\n",
" \"\"\"This method should take a number as input and return a prediction by applying the linear regression formula\"\"\"\n",
" ## apply linear regression to x and return the result\n",
" return w * x + b\n",
" pass\n",
"\n",
"assert forward(15) == 18.6, \"The forward method is incorrectly implemented\""
]
Expand Down Expand Up @@ -219,13 +219,7 @@
" dl_db = 0.0\n",
"\n",
" ## Compute and return the partial derivatives\n",
" for i in range(N):\n",
" dl_dw += -2 * x[i] * (y[i] - (w * x[i] + b))\n",
" dl_db += -2 * (y[i] - (w * x[i] + b))\n",
" dl_dw *= 1/N\n",
" dl_db *= 1/N\n",
"\n",
" return dl_dw, dl_db\n",
" pass\n",
"\n",
"dl_dw, dl_db = gradient_descent([10, 20, 30, 40, 50], [20, 40, 60, 80, 100])\n",
"assert (dl_dw, dl_db) == (-4400.0, -120.0), \"The answer is probably incorrect !\""
Expand All @@ -247,8 +241,6 @@
"def optimizer(w: float, b: float, dl_dw: float, dl_db: float, alpha: float) -> Tuple[float, float]:\n",
" ## Update w and b\n",
" ##\n",
" w = w - alpha * dl_dw\n",
" b = b - alpha * dl_db\n",
" return w, b\n",
"\n",
"assert optimizer(w, b, dl_dw, dl_db, alpha=5e-4) == (2.2, 0.06), \"Not correct\""
Expand Down Expand Up @@ -289,7 +281,7 @@
"outputs": [],
"source": [
"####################### Data #########################\n",
"x_values = np.linspace(100, 500, 100) # Choose the amount of samples of data you wish to train on\n",
"x_values = None # Choose the amount of samples of data you wish to train on\n",
"x_train = np.array(x_values, dtype=np.float32)\n",
"y_values = [100*i + 1 for i in x_values]\n",
"y_train = np.array(y_values, dtype=np.float32)\n",
Expand All @@ -303,8 +295,8 @@
"w = 0.0\n",
"b = 0.0\n",
"################## Hyperparameters ##################\n",
"EPOCH = 100 # Choose the amount of times you wish to iterate on the training data\n",
"LR = 5e-4 # Choose how quick your neural network will learn (faster != better)\n",
"EPOCH = None # Choose the amount of times you wish to iterate on the training data\n",
"LR = None # Choose how quick your neural network will learn (faster != better)\n",
"#####################################################"
]
},
Expand Down Expand Up @@ -359,25 +351,24 @@
"\n",
" for x, y in zip(x_train, y_train):\n",
" # Prediction\n",
" pred = w * x + b\n",
" pred = max(0, pred)\n",
" pred = None\n",
"\n",
" # Gradient descent\n",
" dl_dw += -2 * x * (y - pred)\n",
" dl_db += -2 * (y - pred)\n",
" dl_dw += None\n",
" dl_db += None\n",
"\n",
" # Getting the average values\n",
" dl_dw *= 1/N\n",
" dl_db *= 1/N\n",
" dl_dw *= None\n",
" dl_db *= None\n",
"\n",
" # Optimization\n",
" w = w - LR * dl_dw\n",
" b = b - LR * dl_db\n",
" w = None\n",
" b = None\n",
"\n",
" # Logging loss\n",
" total_error = 0.0\n",
" for i in range(N):\n",
" total_error += (y_train[i] - (w * x_train[i] + b)) ** 2\n",
" total_error += None\n",
" loss_history.append(total_error / N)\n",
"\n",
"\n",
Expand Down
28 changes: 10 additions & 18 deletions AI/Day2/logistic-regression.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -188,8 +188,7 @@
"source": [
"class MyLogisticRegression:\n",
" def __init__(self, max_iter=EPOCH, lr=LR):\n",
" self.max_iter = max_iter\n",
" self.lr = lr\n",
" pass\n",
"\n",
"assert MyLogisticRegression().max_iter == EPOCH, \"we can't find max_iter inside your class\"\n",
"assert MyLogisticRegression().lr == LR, \"we can't find lr inside your class\""
Expand Down Expand Up @@ -221,8 +220,8 @@
" ## Keep in mind that we have only one layer\n",
" ## While both of these values must be initialised as 0, they won't be initialised the same way\n",
" ## Try to remember the properties and use of both of these values if you can't remember why\n",
" self.w = np.zeros(x.shape[1])\n",
" self.b = 0\n",
" self.w = None\n",
" self.b = None\n",
"\n",
" assert np.mean(self.w) == 0, \"w should be initialised to 0\"\n",
" assert self.b == 0, \"b should be initialised to 0\"\n",
Expand Down Expand Up @@ -295,8 +294,7 @@
" \"\"\"\n",
" The predict method returns its predictions for the given input data.\n",
" \"\"\"\n",
" y_pred = self._forward(x)\n",
" return np.array([1 if p > 0.5 else 0 for p in y_pred])\n",
" pass\n",
"\n",
"MyLogisticRegression.predict = predict"
]
Expand All @@ -322,16 +320,15 @@
" \"\"\"\n",
" The linear transformation applies the layer's weights and biases to the input data.\n",
" \"\"\"\n",
" return self.w @ x.T + self.b\n",
" pass\n",
"\n",
"MyLogisticRegression._linear = linear\n",
"\n",
"def optimize(self, dw: np.ndarray, db: np.ndarray):\n",
" \"\"\"\n",
" The optimize method performs the gradient descent update to the weights and biases.\n",
" \"\"\"\n",
" self.w -= dw * self.lr\n",
" self.b -= db * self.lr\n",
" pass\n",
"\n",
"MyLogisticRegression._optimize = optimize\n",
"\n",
Expand Down Expand Up @@ -382,8 +379,7 @@
" \"\"\"\n",
" The sigmoid function squashes the input data between 0 and 1.\n",
" \"\"\"\n",
" z = np.exp(-x)\n",
" return 1 / (1 + z)\n",
" pass\n",
" \n",
"MyLogisticRegression._sigmoid = sigmoid\n",
"\n",
Expand Down Expand Up @@ -416,8 +412,7 @@
" \"\"\"\n",
" The forward method passes the input data through the model's transformations.\n",
" \"\"\"\n",
" y_pred = self._linear(x)\n",
" return np.array([self._sigmoid(val) for val in y_pred])\n",
" pass\n",
"\n",
"MyLogisticRegression._forward = forward\n",
"## Testing ##\n",
Expand Down Expand Up @@ -500,7 +495,7 @@
" \"\"\"\n",
" Binary Cross Entropy is a method of calculating the model's loss.\n",
" \"\"\"\n",
" return -np.mean(y * np.log(y_pred + 1e-9) + (1 - y) * np.log(1 - y_pred + 1e-9))\n",
" pass\n",
"\n",
"MyLogisticRegression._bce = bce"
]
Expand Down Expand Up @@ -542,10 +537,7 @@
" \"\"\"\n",
" The backward method calculates the gradients for the weights and biases.\n",
" \"\"\"\n",
" y_pred = self._forward(x)\n",
" db = np.mean(y_pred - y)\n",
" dw = np.array([np.mean(grad) for grad in x.T @ (y_pred - y)])\n",
" return dw, db\n",
" pass\n",
"\n",
"MyLogisticRegression._backward = backward"
]
Expand Down

0 comments on commit b91a4b9

Please sign in to comment.