From 6d93095d5d0c1705f45ce7037ff35e7c3df726be Mon Sep 17 00:00:00 2001
From: Toren Lev Fronsdal <toren@stanford.edu>
Date: Thu, 25 Feb 2021 20:45:50 -0800
Subject: [PATCH] Update model

---
 baseline_model.py | 72 +++++++++++++++++++++++++++--------------------
 1 file changed, 41 insertions(+), 31 deletions(-)

diff --git a/baseline_model.py b/baseline_model.py
index d824440..5c0e0ba 100644
--- a/baseline_model.py
+++ b/baseline_model.py
@@ -21,19 +21,16 @@ import torch.optim as optim
 
 # sklearn
 import sklearn.base
-from sklearn.decomposition import PCA
 
 import joblib
 
 from sklearn.base import TransformerMixin, BaseEstimator
-from sklearn.feature_selection import VarianceThreshold
-from sklearn.preprocessing import OneHotEncoder
+from sklearn.preprocessing import OneHotEncoder, StandardScaler
 from sklearn.decomposition import PCA
 from sklearn.pipeline import make_union, make_pipeline
 from sklearn.compose import make_column_transformer
-
-from scipy.stats import kurtosis, skew
-
+from sklearn.model_selection import train_test_split
+from sklearn.metrics import log_loss
 
 ########################
 ### Global variables ###
@@ -58,7 +55,7 @@ class Preprocessor(TransformerMixin):
         num_pc_genes = 80,
         num_pc_cells = 10,
         seed = 2021
-        ):
+    ):
         
         self.variance_threshold = variance_threshold,
         self.num_pc_genes = num_pc_genes
@@ -68,14 +65,15 @@ class Preprocessor(TransformerMixin):
     def fit(self, X, y = None, X_test = None):
         if X_test is not None:
             X = pd.concat([X, X_test], axis = 0, ignore_index = True)
-                        
+    
         gene_feats = [col for col in X.columns if col.startswith('g-')]
         cell_feats = [col for col in X.columns if col.startswith('c-')]
-        numeric_feats = gene_feats+cell_feats
+        numeric_feats = gene_feats + cell_feats            
         categorical_feats = ['cp_time', 'cp_dose']        
-
+        
         self._transformer = make_column_transformer(
-            (OneHotEncoder(), categorical_feats)
+            (OneHotEncoder(), categorical_feats),
+            (StandardScaler(), numeric_feats)
         )
         self._transformer.fit(X)
         return self
@@ -433,8 +431,8 @@ class Network(sklearn.base.BaseEstimator):
         preds_proba = 1 / (1 + np.exp(-preds))
         return preds_proba.astype("float32")
 
-
 # %% [code]
+# FOR FUTURE USE: if use label smoothing
 class SmoothCrossEntropyLoss(nn.modules.loss._WeightedLoss):
     """
     Computes smoothed cross entropy (log) loss.
@@ -486,7 +484,7 @@ class SmoothCrossEntropyLoss(nn.modules.loss._WeightedLoss):
 
         return loss
 
-
+# FOR FUTURE USE: if use clipping
 class ClippedCrossEntropyLoss(nn.modules.loss._WeightedLoss):
     """
     Computes clipped cross entropy (log) loss.
@@ -532,66 +530,78 @@ class ClippedCrossEntropyLoss(nn.modules.loss._WeightedLoss):
 train_drug = pd.read_csv("../input/lish-moa/train_drug.csv")
 X = pd.read_csv("../input/lish-moa/train_features.csv")
 y = pd.read_csv("../input/lish-moa/train_targets_scored.csv")
-X_test = pd.read_csv("../input/lish-moa/test_features.csv")
-submission = pd.read_csv("../input/lish-moa/sample_submission.csv")
 
 # Remove control observations
 y = y.loc[X["cp_type"]=="trt_cp"].reset_index(drop=True)
 X = X.loc[X["cp_type"]=="trt_cp"].reset_index(drop=True)
-# used to set control obs. to zero for preds
-X_test_copy = X_test.copy()
 
 # %% [code]
+##########################
+### Data Preprocessing ###
+##########################
+
+X, X_test, y, y_test = train_test_split(
+    X, y, test_size=0.2, random_state=1998
+)
+
 transformer = Preprocessor() 
 transformer.fit(X)
 X = transformer.transform(X)
+X_test = transformer.transform(X_test)
 y = y.drop(["sig_id"], axis = 1).values.astype("float32")
+y_test = y_test.drop(["sig_id"], axis = 1).values.astype("float32")
 
 # %% [code]
+# Define network architecture 
+
 n_input = X.shape[1]
 n_output = y.shape[1]
 hidden_units = 640
 dropout = 0.2
 
 net_obj = Sequential(
-    nn.BatchNorm1d(n_input),
-    nn.Dropout(dropout),
     nn.Linear(n_input, hidden_units),
-    nn.ReLU(),
     nn.BatchNorm1d(hidden_units),
+    nn.ReLU(),
     nn.Dropout(dropout),
     nn.Linear(hidden_units, hidden_units),
-    nn.ReLU(),
     nn.BatchNorm1d(hidden_units),
+    nn.ReLU(),
     nn.Dropout(dropout),
     nn.Linear(hidden_units, n_output)
 )
 
 # %% [code]
-# zero the submission preds
-submission.iloc[:,1:207] = 0
+loss = nn.BCEWithLogitsLoss()
 
+# Initialize network
 net = Network(
     net_obj=net_obj, 
-    max_epochs=6,
+    max_epochs=20,
     batch_size=128, 
     device=device,
-    loss_fn=SmoothCrossEntropyLoss(smoothing=0.001, device=device), 
-    lr=0.001,
+    loss_fn=loss, 
+    lr=0.01,
     weight_decay=1e-6,
     lr_scheduler="ReduceLROnPlateau"
 )
 
-clipped_log_loss = ClippedCrossEntropyLoss(smoothing=0.001)
 
 net.fit(
     X=X, 
     y=y, 
-    eval_metric=[clipped_log_loss], 
-    patience=7,
+    eval_set=[(X_test, y_test)],
+    eval_metric=[loss],
     verbose=2
 )
 
-net.predict_proba(X)
-
 # %% [code]
+def multi_log_loss(y_pred, y_true):
+    losses = -y_true * np.log(y_pred + 1e-15) - (1 - y_true) * np.log(1 - y_pred + 1e-15)
+    return np.mean(losses)
+
+preds = net.predict_proba(X)
+print("Train loss: ", multi_log_loss(preds, y))
+    
+test_preds = net.predict_proba(X_test)
+print("Test loss: ", multi_log_loss(test_preds, y_test))
-- 
GitLab