-
-
Notifications
You must be signed in to change notification settings - Fork 214
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #536 from CoderOMaster/main
Vulnerable Cancer Patients
- Loading branch information
Showing
7 changed files
with
13,185 additions
and
0 deletions.
There are no files selected for viewing
10,393 changes: 10,393 additions & 0 deletions
10,393
Vulnerable Cancer Patient Analysis/Dataset/Mental Health Dataset.csv
Large diffs are not rendered by default.
Oops, something went wrong.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2,384 changes: 2,384 additions & 0 deletions
2,384
Vulnerable Cancer Patient Analysis/Model/EDA+ROBERTA.ipynb
Large diffs are not rendered by default.
Oops, something went wrong.
351 changes: 351 additions & 0 deletions
351
Vulnerable Cancer Patient Analysis/Model/LSTM_GRU.ipynb
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,351 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 1, | ||
"metadata": { | ||
"id": "v7KQqli6SXxZ" | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"import numpy as np\n", | ||
"import pandas as pd\n", | ||
"import tensorflow as tf\n", | ||
"import re\n", | ||
"from tensorflow.keras.preprocessing.text import one_hot\n", | ||
"import matplotlib.pyplot as py\n", | ||
"from tensorflow.keras.preprocessing.text import Tokenizer\n", | ||
"from tensorflow.keras.preprocessing.sequence import pad_sequences\n", | ||
"from tensorflow.keras.models import Sequential,load_model\n", | ||
"from tensorflow.keras.layers import Embedding,LSTM,Dense,Dropout\n", | ||
"from sklearn.model_selection import train_test_split\n", | ||
"from tensorflow.keras.layers import Flatten\n", | ||
"from tensorflow.keras.layers import Embedding" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 4, | ||
"metadata": { | ||
"id": "UXGmVN1VSbBN" | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"df=pd.read_csv(\"Mental Health Dataset.csv\")\n", | ||
"df.posts=df.posts.astype(str)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 5, | ||
"metadata": { | ||
"colab": { | ||
"base_uri": "https://localhost:8080/" | ||
}, | ||
"id": "rPl1uMVXUPSx", | ||
"outputId": "f6eb9b04-02cb-4b89-b4da-3dc98dad6436" | ||
}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
" posts predicted \\\n", | ||
"10387 hey everyone I am a 25 year old male I work ou... negative \n", | ||
"10388 have surgery for stage 1 colon cancer 1 year a... very negative \n", | ||
"10389 the doctor advise we he could not remove the a... neutral \n", | ||
"10390 my 66 year old father have been through so muc... neutral \n", | ||
"10391 I have bein have a bloody stool since last yea... negative \n", | ||
"\n", | ||
" intensity \n", | ||
"10387 -1 \n", | ||
"10388 -2 \n", | ||
"10389 0 \n", | ||
"10390 0 \n", | ||
"10391 -1 \n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"df.isnull().sum()\n", | ||
"print(df.tail())" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 6, | ||
"metadata": { | ||
"id": "F4C1iyuEUVNK" | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"def clean_text(text):\n", | ||
" text=text.lower()\n", | ||
" text=re.sub(r'[^a-zA-Z0-9\\s]','',text)\n", | ||
" return text" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 7, | ||
"metadata": { | ||
"id": "PgotztcoUW-t" | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"df['posts']=df['posts'].apply(clean_text)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 8, | ||
"metadata": { | ||
"id": "8--MaQ6nUXE9" | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"from tensorflow.keras.preprocessing.text import Tokenizer\n", | ||
"from tensorflow.keras.preprocessing.sequence import pad_sequences\n", | ||
"\n", | ||
"# Assuming df.posts is your text data\n", | ||
"vocab_size=500\n", | ||
"max_length = 200\n", | ||
"embed_vector_size=200\n", | ||
"tokenizer = Tokenizer(num_words=vocab_size, oov_token='<OOV>')\n", | ||
"tokenizer.fit_on_texts(df.posts)\n", | ||
"\n", | ||
"# Convert text to sequences\n", | ||
"encoded_reviews = tokenizer.texts_to_sequences(df.posts)\n", | ||
"\n", | ||
"# Pad sequences\n", | ||
"padded_reviews = pad_sequences(encoded_reviews, maxlen=max_length, padding='post')\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 9, | ||
"metadata": { | ||
"colab": { | ||
"base_uri": "https://localhost:8080/" | ||
}, | ||
"id": "3BOM7QOiUc-1", | ||
"outputId": "9dbbf3c5-3131-4c94-e337-84694392c8e0" | ||
}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Epoch 1/5\n", | ||
"260/260 [==============================] - 13s 39ms/step - loss: 0.9201 - accuracy: 0.6200 - val_loss: 0.7941 - val_accuracy: 0.6720\n", | ||
"Epoch 2/5\n", | ||
"260/260 [==============================] - 3s 10ms/step - loss: 0.5993 - accuracy: 0.7702 - val_loss: 0.7475 - val_accuracy: 0.6965\n", | ||
"Epoch 3/5\n", | ||
"260/260 [==============================] - 2s 8ms/step - loss: 0.3824 - accuracy: 0.8818 - val_loss: 0.7725 - val_accuracy: 0.6917\n", | ||
"Epoch 4/5\n", | ||
"260/260 [==============================] - 1s 5ms/step - loss: 0.2382 - accuracy: 0.9364 - val_loss: 0.8379 - val_accuracy: 0.6797\n", | ||
"Epoch 5/5\n", | ||
"260/260 [==============================] - 1s 6ms/step - loss: 0.1588 - accuracy: 0.9651 - val_loss: 0.8689 - val_accuracy: 0.6768\n", | ||
"65/65 [==============================] - 0s 4ms/step - loss: 0.8689 - accuracy: 0.6768\n", | ||
"Accuracy on Test Set: 0.6767676472663879\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"#ANN BASED MODEL IS HERE\n", | ||
"from tensorflow.keras.utils import to_categorical\n", | ||
"\n", | ||
"# Split the data into training and testing sets\n", | ||
"X_train, X_test, y_train, y_test = train_test_split(padded_reviews, df['intensity'], test_size=0.2, random_state=42)\n", | ||
"\n", | ||
"# Build the model with Dropout layers\n", | ||
"model = Sequential()\n", | ||
"model.add(Embedding(vocab_size, embed_vector_size, input_length=max_length, name=\"embedding\"))\n", | ||
"model.add(Flatten())\n", | ||
"model.add(Dropout(0.5)) # Adding dropout with a dropout rate of 0.5\n", | ||
"model.add(Dense(4, activation='softmax')) # Assuming you have 4 classes\n", | ||
"\n", | ||
"# Compile the model\n", | ||
"model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n", | ||
"\n", | ||
"# Train the model on the oversampled data\n", | ||
"model.fit(X_train, to_categorical(y_train,num_classes=4), epochs=5, validation_data=(X_test, to_categorical(y_test, num_classes=4)))\n", | ||
"\n", | ||
"# Evaluate the model\n", | ||
"loss, accuracy = model.evaluate(X_test, to_categorical(y_test, num_classes=4))\n", | ||
"print(f\"Accuracy on Test Set: {accuracy}\")\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 10, | ||
"metadata": { | ||
"colab": { | ||
"base_uri": "https://localhost:8080/" | ||
}, | ||
"id": "XgP6hLYOpoBZ", | ||
"outputId": "6ec0cd75-23bb-441c-93ad-7e6250ff9952" | ||
}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Epoch 1/15\n", | ||
"260/260 [==============================] - 15s 39ms/step - loss: 1.1118 - accuracy: 0.5254 - val_loss: 1.0045 - val_accuracy: 0.6123\n", | ||
"Epoch 2/15\n", | ||
"260/260 [==============================] - 6s 22ms/step - loss: 1.0912 - accuracy: 0.4992 - val_loss: 1.0932 - val_accuracy: 0.4598\n", | ||
"Epoch 3/15\n", | ||
"260/260 [==============================] - 5s 18ms/step - loss: 1.0079 - accuracy: 0.5809 - val_loss: 0.8843 - val_accuracy: 0.6494\n", | ||
"Epoch 4/15\n", | ||
"260/260 [==============================] - 6s 22ms/step - loss: 0.8847 - accuracy: 0.6542 - val_loss: 0.8469 - val_accuracy: 0.6522\n", | ||
"Epoch 5/15\n", | ||
"260/260 [==============================] - 4s 17ms/step - loss: 0.8212 - accuracy: 0.6751 - val_loss: 0.7992 - val_accuracy: 0.6888\n", | ||
"Epoch 6/15\n", | ||
"260/260 [==============================] - 4s 17ms/step - loss: 0.7563 - accuracy: 0.7058 - val_loss: 0.7528 - val_accuracy: 0.6931\n", | ||
"Epoch 7/15\n", | ||
"260/260 [==============================] - 6s 21ms/step - loss: 0.7399 - accuracy: 0.7131 - val_loss: 0.7206 - val_accuracy: 0.7003\n", | ||
"Epoch 8/15\n", | ||
"260/260 [==============================] - 4s 17ms/step - loss: 0.6795 - accuracy: 0.7378 - val_loss: 0.7090 - val_accuracy: 0.7095\n", | ||
"Epoch 9/15\n", | ||
"260/260 [==============================] - 5s 20ms/step - loss: 0.6676 - accuracy: 0.7437 - val_loss: 0.6944 - val_accuracy: 0.7109\n", | ||
"Epoch 10/15\n", | ||
"260/260 [==============================] - 5s 19ms/step - loss: 0.6346 - accuracy: 0.7564 - val_loss: 0.7330 - val_accuracy: 0.7114\n", | ||
"Epoch 11/15\n", | ||
"260/260 [==============================] - 4s 17ms/step - loss: 0.6257 - accuracy: 0.7616 - val_loss: 0.6922 - val_accuracy: 0.7085\n", | ||
"Epoch 12/15\n", | ||
"260/260 [==============================] - 5s 20ms/step - loss: 0.6012 - accuracy: 0.7736 - val_loss: 0.7449 - val_accuracy: 0.6955\n", | ||
"Epoch 13/15\n", | ||
"260/260 [==============================] - 5s 18ms/step - loss: 0.5886 - accuracy: 0.7785 - val_loss: 0.6919 - val_accuracy: 0.7133\n", | ||
"Epoch 14/15\n", | ||
"260/260 [==============================] - 5s 18ms/step - loss: 0.5754 - accuracy: 0.7824 - val_loss: 0.7149 - val_accuracy: 0.7172\n", | ||
"Epoch 15/15\n", | ||
"260/260 [==============================] - 6s 21ms/step - loss: 0.5603 - accuracy: 0.7902 - val_loss: 0.7128 - val_accuracy: 0.7201\n", | ||
"65/65 [==============================] - 0s 7ms/step - loss: 0.7128 - accuracy: 0.7201\n", | ||
"Accuracy on Test Set: 0.7200577259063721\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"#DOUBLE LAYERED LSTM \n", | ||
"\n", | ||
"from tensorflow.keras.layers import LSTM\n", | ||
"\n", | ||
"# Build the model with LSTM and Dropout layers\n", | ||
"model = Sequential()\n", | ||
"model.add(Embedding(vocab_size, embed_vector_size, input_length=max_length, name=\"embedding\"))\n", | ||
"model.add(LSTM(64, return_sequences=True))\n", | ||
"model.add(LSTM(32))\n", | ||
"model.add(Dropout(0.2)) # Adding dropout with a dropout rate of 0.2\n", | ||
"model.add(Dense(4, activation='softmax')) # Assuming you have 4 classes\n", | ||
"\n", | ||
"# Compile the model\n", | ||
"model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # Use categorical_crossentropy for classification\n", | ||
"\n", | ||
"# Train the model on the oversampled data\n", | ||
"model.fit(X_train, to_categorical(y_train, num_classes=4), epochs=15, validation_data=(X_test, to_categorical(y_test, num_classes=4)))\n", | ||
"\n", | ||
"# Evaluate the model\n", | ||
"loss, accuracy = model.evaluate(X_test, to_categorical(y_test, num_classes=4))\n", | ||
"print(f\"Accuracy on Test Set: {accuracy}\")\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 23, | ||
"metadata": { | ||
"colab": { | ||
"base_uri": "https://localhost:8080/" | ||
}, | ||
"id": "RPUWSzSY1Vjo", | ||
"outputId": "0239db05-8953-4f76-bbd2-ca9017d9a4c5" | ||
}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Epoch 1/15\n", | ||
"260/260 [==============================] - 15s 45ms/step - loss: 1.1295 - accuracy: 0.4896 - val_loss: 0.9466 - val_accuracy: 0.6219\n", | ||
"Epoch 2/15\n", | ||
"260/260 [==============================] - 5s 17ms/step - loss: 0.9124 - accuracy: 0.6427 - val_loss: 0.8370 - val_accuracy: 0.6652\n", | ||
"Epoch 3/15\n", | ||
"260/260 [==============================] - 6s 22ms/step - loss: 0.8356 - accuracy: 0.6738 - val_loss: 0.8008 - val_accuracy: 0.6734\n", | ||
"Epoch 4/15\n", | ||
"260/260 [==============================] - 6s 22ms/step - loss: 0.7830 - accuracy: 0.6951 - val_loss: 0.8152 - val_accuracy: 0.6537\n", | ||
"Epoch 5/15\n", | ||
"260/260 [==============================] - 5s 19ms/step - loss: 0.7538 - accuracy: 0.7119 - val_loss: 0.7658 - val_accuracy: 0.7003\n", | ||
"Epoch 6/15\n", | ||
"260/260 [==============================] - 5s 19ms/step - loss: 0.7325 - accuracy: 0.7263 - val_loss: 0.7631 - val_accuracy: 0.7018\n", | ||
"Epoch 7/15\n", | ||
"260/260 [==============================] - 5s 18ms/step - loss: 0.7165 - accuracy: 0.7329 - val_loss: 0.7431 - val_accuracy: 0.7003\n", | ||
"Epoch 8/15\n", | ||
"260/260 [==============================] - 5s 21ms/step - loss: 0.6951 - accuracy: 0.7374 - val_loss: 0.7777 - val_accuracy: 0.6922\n", | ||
"Epoch 9/15\n", | ||
"260/260 [==============================] - 5s 17ms/step - loss: 0.6751 - accuracy: 0.7427 - val_loss: 0.7564 - val_accuracy: 0.7023\n", | ||
"Epoch 10/15\n", | ||
"260/260 [==============================] - 4s 17ms/step - loss: 0.6509 - accuracy: 0.7465 - val_loss: 0.7279 - val_accuracy: 0.7047\n", | ||
"Epoch 11/15\n", | ||
"260/260 [==============================] - 5s 21ms/step - loss: 0.6305 - accuracy: 0.7592 - val_loss: 0.7105 - val_accuracy: 0.7013\n", | ||
"Epoch 12/15\n", | ||
"260/260 [==============================] - 5s 18ms/step - loss: 0.6107 - accuracy: 0.7707 - val_loss: 0.7059 - val_accuracy: 0.7133\n", | ||
"Epoch 13/15\n", | ||
"260/260 [==============================] - 5s 20ms/step - loss: 0.5945 - accuracy: 0.7707 - val_loss: 0.7101 - val_accuracy: 0.7076\n", | ||
"Epoch 14/15\n", | ||
"260/260 [==============================] - 5s 19ms/step - loss: 0.5850 - accuracy: 0.7776 - val_loss: 0.7368 - val_accuracy: 0.7037\n", | ||
"Epoch 15/15\n", | ||
"260/260 [==============================] - 4s 17ms/step - loss: 0.5728 - accuracy: 0.7848 - val_loss: 0.7234 - val_accuracy: 0.7124\n", | ||
"65/65 [==============================] - 0s 7ms/step - loss: 0.7234 - accuracy: 0.7124\n", | ||
"Accuracy on Test Set: 0.7123616933822632\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"#GRU BASED MODEL\n", | ||
"from tensorflow.keras.layers import Embedding, GRU\n", | ||
"model = Sequential()\n", | ||
"model.add(Embedding(input_dim=vocab_size, output_dim=16, input_length=max_length))\n", | ||
"model.add(GRU(units=64, return_sequences=True))\n", | ||
"model.add(GRU(units=32))\n", | ||
"model.add(Dropout(0.2))\n", | ||
"model.add(Dense(units=4, activation='softmax'))\n", | ||
"\n", | ||
"# Compile the model\n", | ||
"model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n", | ||
"\n", | ||
"# Train the model on the oversampled data\n", | ||
"model.fit(X_train, to_categorical(y_train,num_classes=4), epochs=15, validation_data=(X_test, to_categorical(y_test, num_classes=4)))\n", | ||
"\n", | ||
"# Evaluate the model\n", | ||
"loss, accuracy = model.evaluate(X_test, to_categorical(y_test, num_classes=4))\n", | ||
"print(f\"Accuracy on Test Set: {accuracy}\")\n" | ||
] | ||
} | ||
], | ||
"metadata": { | ||
"accelerator": "GPU", | ||
"colab": { | ||
"gpuType": "T4", | ||
"provenance": [] | ||
}, | ||
"kernelspec": { | ||
"display_name": "Python 3 (ipykernel)", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.11.6" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 4 | ||
} |
Oops, something went wrong.