Reseaux de neurones profonds avec TensorFlow

1 Introduction

On va modeliser ici une cible multi-classe a l’aide de l’interface Keras qui a ete integree a TensorFlow. Elle permet une modelisation rapide et fournit des sorties graphiques sur la qualite du modele.

On reprend le jeu de donnees et les pretraitements du document precedent sur TensorFlow.

1.0.1 Donnees et modules

library("reticulate")

# on a installe tensorflow dans l'environnement vituel conda "tf_env"
use_condaenv(condaenv = "tf_env")

data(package = "arules", IncomeESL)

dtf_class = tidyr::drop_na(IncomeESL)
colnames(dtf_class) = gsub(" ", "_", colnames(dtf_class))

dtf_class = dtf_class[c("number_in_household", "marital_status", "householder_status", "income")]

dtf_class$number_in_household = as.character(dtf_class$number_in_household)
dtf_class$number_in_household[dtf_class$number_in_household == "9+"]= "9"
dtf_class$number_in_household = as.integer(dtf_class$number_in_household)

for (col_quali in colnames(dtf_class)[sapply(dtf_class, is.factor)]) {
  dtf_class[[col_quali]] = as.character(dtf_class[[col_quali]] )
}

dtf_class$income = gsub(",", "-", dtf_class$income)
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', None)

import sys
import shutil
import pprint
pp = pprint.PrettyPrinter(indent=4)

from sklearn.model_selection import train_test_split

import tensorflow as tf
from tensorflow import feature_column
from tensorflow.keras import layers

# pour regler un bug graphique assez courant avec Anaconda, 
# adapter le chemin vers le dossier 'plugins/platforms'
import os
from os import path
os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = 'C:/Users/Sebastien/Anaconda3/Library/plugins/platforms'
os.chdir(".")

import matplotlib.pyplot as plt
import seaborn as sns
sns.set()

tf.random.set_seed(2021)

Les versions Python et TensorFlow utilisees.

sys.version
tf.__version__
'3.9.6 (default, Jul 30 2021, 11:42:22) [MSC v.1916 64 bit (AMD64)]'
'2.6.0'

Les donnees sous Python.

dtf_class = r.dtf_class

dtf_class.head()
   number_in_household marital_status        householder_status   income
0                    5        married                       own      75+
1                    3        married                      rent      75+
2                    4         single  live with parents/family   [0-10)
3                    4         single  live with parents/family   [0-10)
4                    2        married                       own  [50-75)

Frequences des modalites de la cible.

dtf_class.income.value_counts(normalize=True).sort_index()
75+        0.108057
[0-10)     0.182519
[10-15)    0.076934
[15-20)    0.073444
[20-25)    0.089878
[25-30)    0.076643
[30-40)    0.123037
[40-50)    0.114020
[50-75)    0.155468
Name: income, dtype: float64

Dictionnaire pour recoder la variable cible.

# mapping pour recoder les modalites cibles en entiers
dico_income = {'[0-10)':0,'[10-15)':1,'[15-20)':2, '[20-25)':3,'[25-30)':4, '[30-40)':5, '[40-50)':6, '[50-75)':7,'75+':8}
dico_income_bin = {'[0-10)':0,'[10-15)':0,'[15-20)':0, '[20-25)':0,'[25-30)':0, '[30-40)':1, '[40-50)':1, '[50-75)':1,'75+':1}
pp.pprint(dico_income)
pp.pprint(dico_income_bin)
{   '75+': 8,
    '[0-10)': 0,
    '[10-15)': 1,
    '[15-20)': 2,
    '[20-25)': 3,
    '[25-30)': 4,
    '[30-40)': 5,
    '[40-50)': 6,
    '[50-75)': 7}
{   '75+': 1,
    '[0-10)': 0,
    '[10-15)': 0,
    '[15-20)': 0,
    '[20-25)': 0,
    '[25-30)': 0,
    '[30-40)': 1,
    '[40-50)': 1,
    '[50-75)': 1}

1.0.2 Pretraitement des colonnes

feature_columns = []

# numeriques
feature_columns.append(feature_column.numeric_column("number_in_household"))

# en buckets
number_in_household = feature_column.numeric_column("number_in_household")
number_bucket = tf.feature_column.bucketized_column(number_in_household, boundaries = [3, 5, 7])
feature_columns.append(number_bucket)

# indicatrices
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
  'marital_status', ['single', 'married', 'divorced', 'cohabitation'', widowed'])
marital_one_hot = tf.feature_column.indicator_column(marital_status)
feature_columns.append(marital_one_hot)

# embedding
householder_status = tf.feature_column.categorical_column_with_vocabulary_list(
  'householder_status', ['own', 'rent', 'live with parents/family'])
householder_embedding = tf.feature_column.embedding_column(householder_status, dimension = 2)
feature_columns.append(householder_embedding)

# interactions
interactions = tf.feature_column.crossed_column([marital_status, number_bucket], 
hash_bucket_size = 7, hash_key = 8)
interactions = tf.feature_column.indicator_column(interactions)
feature_columns.append(interactions)

Dictionnaire pour la lisibilite des operations effectuees, une liste suffirait techniquement.

feature_columns = {"number_in_household":feature_column.numeric_column("number_in_household"),
  "number_bucket": number_bucket,
  "marital_one_hot": marital_one_hot,
  "householder_embedding": householder_embedding,
  "interactions": interactions
  }

1.0.3 Fonction de lecture des donnees

Par defaut on considere que la cible est multi-classes et on recode ses modalites de 0 a 8. Si on prend dico = dico_income_bin on considere que la cible est binaire et recodee en 0/1.

# on cree un tf.data.dataset a patir du Dataframe
def df_to_dataset(dataframe, shuffle = True, batch_size = 32, dico = dico_income, nb_repet = None):
  dataframe = dataframe.copy()
  labels = dataframe.pop('income').map(dico)
  ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
  if shuffle:
    # permutation dde toutes les lignes du dataframe car seulement quelques milliers de lignes
    ds = ds.shuffle(buffer_size = len(dataframe))
  ds = ds.repeat(count = nb_repet).batch(batch_size)
  return ds

2 Modelisation Sequentielle avec Keras

2.0.1 Train, test, validation

Les trois jeux de donnees pour l’apprentissage, les validations et le test final.

# test = 20% du total
train, test = train_test_split(dtf_class, test_size=0.2, random_state = 2021)
# val = 25% du train = 20% du total
train, val = train_test_split(train, test_size=0.25, random_state = 2021)

2.0.2 Parametres et metriques

2.0.2.1 Architecture du modele

Un reseau de neurones avec Keras Sequentiel est une succession de couches de neurones, ici on a :

  • une couche DenseFeatures de conversion des entrees en tenseurs
  • deux couches cachees avec respectivement 16 et 8 neurones et la fonction d’activation standard “relu” pour les couches cachees
  • une couche Dropout qui s”applique a la couche precedente de 8 neurones. Elle permet de lutter contre le surapprentissage en supprimant avec une probabilite de 20% chacun des 8 neurones lors de l’entrainement du reseau. Ca n’a de reel interet que pour des reseaux vraiments profonds, c’est-a-dire avec de nombreuses couches cachees.
  • une couche de sortie avec un score par modalite de la cible. La fonction softmax generalise la sigmoide qui ne s’applique qu’aux cibles binaires.

Le nombre de couches cachees, le nombre de neurones de chaque couche et le taux de dropout sont autant d’hyperparametres a optimiser.

modele_softmax = tf.keras.Sequential([
  layers.DenseFeatures(feature_columns = feature_columns.values()),
  layers.Dense(16, activation = 'relu', name = "hidden1"),
  layers.Dense(8, activation = 'relu', name = "hidden2"),
  layers.Dropout(0.2),
  layers.Dense(len(dico_income), activation = 'softmax', name = "output")
])

Une variante avec cible binaire.

modele_binaire = tf.keras.Sequential([
  layers.DenseFeatures(feature_columns = feature_columns.values()),
  layers.Dense(16, activation = 'relu', name = "hidden1"),
  layers.Dense(8, activation = 'relu', name = "hidden2"),
  layers.Dropout(0.2),
  layers.Dense(1, activation = 'sigmoid', name = "output")
])

2.0.2.2 Autres parametres

On precise la valeur de certains parametres dans la fonction compile plus bas :

  • hyperparametre optimizer : le choix de l’algorithme de descente de gradient, ils n’ont pas tous les memes parametres. Ainsi l’optimiseur Ftrl propose des ratios de regularisation, voir https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Ftrl
  • hyperparametre loss : la fonction de perte a minimiser par l’algorithme de descente de gradient, la “cross-entropy” n’est rien d’autre que l’oppose de la log-vraisemblance
    • tf.keras.losses.BinaryCrossentropy (ou ‘binary_crossentropy’) en cas de cible binaire 0/1
    • tf.keras.losses.CategoricalCrossentropy (ou ‘categorical_crossentropy’) si cible multi-classes au format “une indicatrice par classe”
    • tf.keras.losses.SparseCategoricalCrossentropy (ou ‘sparse_categorical_crossentropy’) si cible multi-classes au format “colonne d’entiers”
  • metrics : les metriques metiers classiques pour evaluer la performance du modele. Les metriques ne sont pas des hyperparametres du modele : elles n’interviennent pas dans l’entrainement du modele. Si la cible est binaire on a entre autres
    • l’AUC
    • l’exactitude toujours fournie par defaut
    • la precision
    • le rappel
    • les metriques de comptages des vrais positifs, faux negatifs, … de la matrice de confusion

Des metriques pour le cas d’une cible binaire.

metriques_bin = [tf.keras.metrics.AUC(name='auc'),
  tf.keras.metrics.BinaryAccuracy(name='accuracy'),
  tf.keras.metrics.Precision(name='precision'),
  tf.keras.metrics.Recall(name='recall'),
  tf.keras.metrics.TruePositives(name='tp'),
  tf.keras.metrics.FalsePositives(name='fp'),
  tf.keras.metrics.TrueNegatives(name='tn'),
  tf.keras.metrics.FalseNegatives(name='fn')
  ]

2.0.2.3 Dependances entre hyperparametres

On suit les recommandations de l’article https://medium.com/google-cloud/ml-design-pattern-3-virtual-epochs-f842296de730 qui deconseille de choisir directement le nombre d’epoques ou d’etapes (c’est-a-dire de mini-batchs) mais plutot

  • la taille des mini-batchs : TRAIN_BATCH_SIZE
  • le nombre total d’exemples montres au modele pendant son entrainement : NUM_TRAIN_EXAMPLES
  • le nombre de checkpoints, a chaque checkpoint on stocke les performances du modele qu’on va representer graphiquement : NUM_CHECKPOINTS

Ici les “epoques” sont virtuelles et ne correspondent pas tout a fait a la lecture de tout le dataframe, comme l’indique la documentation https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit

tf.random.set_seed(2021)

TRAIN_BATCH_SIZE = 64
NUM_TRAIN_EXAMPLES = 200000
NUM_CHECKPOINTS = 20

steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_CHECKPOINTS)

2.0.3 Entrainement des modeles

On entraine d’abord le modele multi-classes. Les jeu de donnees de validation et de test n’ont pas besoin d’etre parcourus plusieurs fois ou d’etre permutes, contrairement au jeu d’apprentissage.

train_ds = df_to_dataset(train, batch_size = TRAIN_BATCH_SIZE)
val_ds = df_to_dataset(val, shuffle = False, batch_size = TRAIN_BATCH_SIZE, nb_repet = 1)
test_ds = df_to_dataset(test, shuffle = False, batch_size = TRAIN_BATCH_SIZE, nb_repet = 1)
    
# on configure le modele
modele_softmax.compile(optimizer = 'adam',
              loss = tf.keras.losses.SparseCategoricalCrossentropy(),
              metrics = ['accuracy'])
    
history_sofmax = modele_softmax.fit(x = train_ds,
                    steps_per_epoch = steps_per_epoch,
                    epochs = NUM_CHECKPOINTS,
                    validation_data = val_ds
                   )
                   
modele_softmax.summary()
Epoch 1/20

  1/156 [..............................] - ETA: 2:19 - loss: 2.2057 - accuracy: 0.1875
 22/156 [===>..........................] - ETA: 0s - loss: 2.2113 - accuracy: 0.1222  
 56/156 [=========>....................] - ETA: 0s - loss: 2.1839 - accuracy: 0.1339
 82/156 [==============>...............] - ETA: 0s - loss: 2.1704 - accuracy: 0.1549
107/156 [===================>..........] - ETA: 0s - loss: 2.1616 - accuracy: 0.1685
136/156 [=========================>....] - ETA: 0s - loss: 2.1542 - accuracy: 0.1745
156/156 [==============================] - 2s 4ms/step - loss: 2.1483 - accuracy: 0.1792 - val_loss: 2.0989 - val_accuracy: 0.2284
Epoch 2/20

  1/156 [..............................] - ETA: 0s - loss: 2.1007 - accuracy: 0.2031
 26/156 [====>.........................] - ETA: 0s - loss: 2.0849 - accuracy: 0.2212
 52/156 [=========>....................] - ETA: 0s - loss: 2.0734 - accuracy: 0.2272
 81/156 [==============>...............] - ETA: 0s - loss: 2.0652 - accuracy: 0.2267
110/156 [====================>.........] - ETA: 0s - loss: 2.0535 - accuracy: 0.2327
140/156 [=========================>....] - ETA: 0s - loss: 2.0472 - accuracy: 0.2345
156/156 [==============================] - 0s 2ms/step - loss: 2.0454 - accuracy: 0.2353 - val_loss: 2.0053 - val_accuracy: 0.2458
Epoch 3/20

  1/156 [..............................] - ETA: 0s - loss: 2.0419 - accuracy: 0.2656
 38/156 [======>.......................] - ETA: 0s - loss: 2.0125 - accuracy: 0.2525
 68/156 [============>.................] - ETA: 0s - loss: 2.0026 - accuracy: 0.2592
 97/156 [=================>............] - ETA: 0s - loss: 1.9886 - accuracy: 0.2642
126/156 [=======================>......] - ETA: 0s - loss: 1.9878 - accuracy: 0.2609
156/156 [==============================] - 0s 2ms/step - loss: 1.9865 - accuracy: 0.2613 - val_loss: 1.9649 - val_accuracy: 0.2705
Epoch 4/20

  1/156 [..............................] - ETA: 0s - loss: 2.0115 - accuracy: 0.2656
 30/156 [====>.........................] - ETA: 0s - loss: 1.9646 - accuracy: 0.2792
 59/156 [==========>...................] - ETA: 0s - loss: 1.9646 - accuracy: 0.2685
 87/156 [===============>..............] - ETA: 0s - loss: 1.9603 - accuracy: 0.2716
117/156 [=====================>........] - ETA: 0s - loss: 1.9605 - accuracy: 0.2707
152/156 [============================>.] - ETA: 0s - loss: 1.9548 - accuracy: 0.2742
156/156 [==============================] - 0s 2ms/step - loss: 1.9555 - accuracy: 0.2741 - val_loss: 1.9470 - val_accuracy: 0.2698
Epoch 5/20

  1/156 [..............................] - ETA: 0s - loss: 2.0049 - accuracy: 0.1875
 30/156 [====>.........................] - ETA: 0s - loss: 1.9442 - accuracy: 0.2812
 60/156 [==========>...................] - ETA: 0s - loss: 1.9469 - accuracy: 0.2784
 97/156 [=================>............] - ETA: 0s - loss: 1.9405 - accuracy: 0.2848
134/156 [========================>.....] - ETA: 0s - loss: 1.9424 - accuracy: 0.2806
156/156 [==============================] - 0s 2ms/step - loss: 1.9430 - accuracy: 0.2799 - val_loss: 1.9373 - val_accuracy: 0.2676
Epoch 6/20

  1/156 [..............................] - ETA: 0s - loss: 1.8487 - accuracy: 0.2812
 23/156 [===>..........................] - ETA: 0s - loss: 1.9345 - accuracy: 0.2772
 52/156 [=========>....................] - ETA: 0s - loss: 1.9293 - accuracy: 0.2876
 88/156 [===============>..............] - ETA: 0s - loss: 1.9370 - accuracy: 0.2836
112/156 [====================>.........] - ETA: 0s - loss: 1.9365 - accuracy: 0.2854
134/156 [========================>.....] - ETA: 0s - loss: 1.9370 - accuracy: 0.2830
154/156 [============================>.] - ETA: 0s - loss: 1.9363 - accuracy: 0.2838
156/156 [==============================] - 0s 2ms/step - loss: 1.9366 - accuracy: 0.2837 - val_loss: 1.9336 - val_accuracy: 0.2676
Epoch 7/20

  1/156 [..............................] - ETA: 0s - loss: 1.8853 - accuracy: 0.3125
 25/156 [===>..........................] - ETA: 0s - loss: 1.9283 - accuracy: 0.2881
 64/156 [===========>..................] - ETA: 0s - loss: 1.9351 - accuracy: 0.2883
104/156 [===================>..........] - ETA: 0s - loss: 1.9282 - accuracy: 0.2877
145/156 [==========================>...] - ETA: 0s - loss: 1.9349 - accuracy: 0.2859
156/156 [==============================] - 0s 2ms/step - loss: 1.9304 - accuracy: 0.2874 - val_loss: 1.9281 - val_accuracy: 0.2705
Epoch 8/20

  1/156 [..............................] - ETA: 0s - loss: 1.8391 - accuracy: 0.3594
 30/156 [====>.........................] - ETA: 0s - loss: 1.9131 - accuracy: 0.2979
 65/156 [===========>..................] - ETA: 0s - loss: 1.9274 - accuracy: 0.2844
101/156 [==================>...........] - ETA: 0s - loss: 1.9205 - accuracy: 0.2921
132/156 [========================>.....] - ETA: 0s - loss: 1.9232 - accuracy: 0.2897
156/156 [==============================] - 0s 2ms/step - loss: 1.9253 - accuracy: 0.2896 - val_loss: 1.9262 - val_accuracy: 0.2749
Epoch 9/20

  1/156 [..............................] - ETA: 0s - loss: 2.0118 - accuracy: 0.2969
 33/156 [=====>........................] - ETA: 0s - loss: 1.9183 - accuracy: 0.2912
 71/156 [============>.................] - ETA: 0s - loss: 1.9289 - accuracy: 0.2843
102/156 [==================>...........] - ETA: 0s - loss: 1.9240 - accuracy: 0.2909
130/156 [========================>.....] - ETA: 0s - loss: 1.9262 - accuracy: 0.2901
156/156 [==============================] - 0s 2ms/step - loss: 1.9250 - accuracy: 0.2919 - val_loss: 1.9256 - val_accuracy: 0.2749
Epoch 10/20

  1/156 [..............................] - ETA: 0s - loss: 1.8372 - accuracy: 0.3906
 21/156 [===>..........................] - ETA: 0s - loss: 1.9499 - accuracy: 0.2879
 47/156 [========>.....................] - ETA: 0s - loss: 1.9237 - accuracy: 0.2896
 77/156 [=============>................] - ETA: 0s - loss: 1.9151 - accuracy: 0.2938
107/156 [===================>..........] - ETA: 0s - loss: 1.9173 - accuracy: 0.2931
139/156 [=========================>....] - ETA: 0s - loss: 1.9216 - accuracy: 0.2910
156/156 [==============================] - 0s 2ms/step - loss: 1.9178 - accuracy: 0.2942 - val_loss: 1.9242 - val_accuracy: 0.2742
Epoch 11/20

  1/156 [..............................] - ETA: 0s - loss: 1.9566 - accuracy: 0.3438
 33/156 [=====>........................] - ETA: 0s - loss: 1.9243 - accuracy: 0.2945
 64/156 [===========>..................] - ETA: 0s - loss: 1.9265 - accuracy: 0.2893
106/156 [===================>..........] - ETA: 0s - loss: 1.9256 - accuracy: 0.2930
145/156 [==========================>...] - ETA: 0s - loss: 1.9225 - accuracy: 0.2929
156/156 [==============================] - 0s 2ms/step - loss: 1.9231 - accuracy: 0.2912 - val_loss: 1.9219 - val_accuracy: 0.2742
Epoch 12/20

  1/156 [..............................] - ETA: 0s - loss: 1.9455 - accuracy: 0.2188
 25/156 [===>..........................] - ETA: 0s - loss: 1.9337 - accuracy: 0.2962
 67/156 [===========>..................] - ETA: 0s - loss: 1.9279 - accuracy: 0.2924
104/156 [===================>..........] - ETA: 0s - loss: 1.9177 - accuracy: 0.2943
136/156 [=========================>....] - ETA: 0s - loss: 1.9186 - accuracy: 0.2941
156/156 [==============================] - 0s 2ms/step - loss: 1.9220 - accuracy: 0.2936 - val_loss: 1.9223 - val_accuracy: 0.2749
Epoch 13/20

  1/156 [..............................] - ETA: 0s - loss: 2.0110 - accuracy: 0.2656
 33/156 [=====>........................] - ETA: 0s - loss: 1.9182 - accuracy: 0.2964
 71/156 [============>.................] - ETA: 0s - loss: 1.9018 - accuracy: 0.3055
105/156 [===================>..........] - ETA: 0s - loss: 1.9050 - accuracy: 0.3042
136/156 [=========================>....] - ETA: 0s - loss: 1.9138 - accuracy: 0.3010
156/156 [==============================] - 0s 2ms/step - loss: 1.9104 - accuracy: 0.2996 - val_loss: 1.9215 - val_accuracy: 0.2742
Epoch 14/20

  1/156 [..............................] - ETA: 0s - loss: 1.9884 - accuracy: 0.2656
 36/156 [=====>........................] - ETA: 0s - loss: 1.9161 - accuracy: 0.3073
 74/156 [=============>................] - ETA: 0s - loss: 1.9189 - accuracy: 0.2998
106/156 [===================>..........] - ETA: 0s - loss: 1.9189 - accuracy: 0.3009
138/156 [=========================>....] - ETA: 0s - loss: 1.9177 - accuracy: 0.3006
156/156 [==============================] - 0s 2ms/step - loss: 1.9144 - accuracy: 0.2998 - val_loss: 1.9208 - val_accuracy: 0.2742
Epoch 15/20

  1/156 [..............................] - ETA: 0s - loss: 2.0066 - accuracy: 0.2656
 22/156 [===>..........................] - ETA: 0s - loss: 1.9385 - accuracy: 0.2784
 54/156 [=========>....................] - ETA: 0s - loss: 1.9149 - accuracy: 0.2908
 88/156 [===============>..............] - ETA: 0s - loss: 1.9105 - accuracy: 0.2923
125/156 [=======================>......] - ETA: 0s - loss: 1.9103 - accuracy: 0.2943
156/156 [==============================] - 0s 2ms/step - loss: 1.9084 - accuracy: 0.2960 - val_loss: 1.9197 - val_accuracy: 0.2764
Epoch 16/20

  1/156 [..............................] - ETA: 0s - loss: 1.9602 - accuracy: 0.2656
 24/156 [===>..........................] - ETA: 0s - loss: 1.9501 - accuracy: 0.2793
 58/156 [==========>...................] - ETA: 0s - loss: 1.9357 - accuracy: 0.2780
 95/156 [=================>............] - ETA: 0s - loss: 1.9255 - accuracy: 0.2857
127/156 [=======================>......] - ETA: 0s - loss: 1.9219 - accuracy: 0.2895
156/156 [==============================] - 0s 2ms/step - loss: 1.9199 - accuracy: 0.2923 - val_loss: 1.9207 - val_accuracy: 0.2764
Epoch 17/20

  1/156 [..............................] - ETA: 0s - loss: 1.8909 - accuracy: 0.3594
 25/156 [===>..........................] - ETA: 0s - loss: 1.9278 - accuracy: 0.2869
 55/156 [=========>....................] - ETA: 0s - loss: 1.9132 - accuracy: 0.2977
 85/156 [===============>..............] - ETA: 0s - loss: 1.9066 - accuracy: 0.3029
110/156 [====================>.........] - ETA: 0s - loss: 1.9124 - accuracy: 0.2996
135/156 [========================>.....] - ETA: 0s - loss: 1.9109 - accuracy: 0.2993
156/156 [==============================] - 0s 2ms/step - loss: 1.9102 - accuracy: 0.2982 - val_loss: 1.9197 - val_accuracy: 0.2771
Epoch 18/20

  1/156 [..............................] - ETA: 0s - loss: 1.9674 - accuracy: 0.3438
 27/156 [====>.........................] - ETA: 0s - loss: 1.9177 - accuracy: 0.2998
 56/156 [=========>....................] - ETA: 0s - loss: 1.9023 - accuracy: 0.3033
 80/156 [==============>...............] - ETA: 0s - loss: 1.9065 - accuracy: 0.2977
104/156 [===================>..........] - ETA: 0s - loss: 1.9097 - accuracy: 0.2970
136/156 [=========================>....] - ETA: 0s - loss: 1.9052 - accuracy: 0.2993
156/156 [==============================] - 0s 2ms/step - loss: 1.9093 - accuracy: 0.2961 - val_loss: 1.9201 - val_accuracy: 0.2749
Epoch 19/20

  1/156 [..............................] - ETA: 0s - loss: 1.8487 - accuracy: 0.2969
 25/156 [===>..........................] - ETA: 0s - loss: 1.8996 - accuracy: 0.3075
 57/156 [=========>....................] - ETA: 0s - loss: 1.8892 - accuracy: 0.3054
 81/156 [==============>...............] - ETA: 0s - loss: 1.8984 - accuracy: 0.3011
106/156 [===================>..........] - ETA: 0s - loss: 1.8996 - accuracy: 0.3011
135/156 [========================>.....] - ETA: 0s - loss: 1.9030 - accuracy: 0.3002
156/156 [==============================] - 0s 2ms/step - loss: 1.9064 - accuracy: 0.2994 - val_loss: 1.9185 - val_accuracy: 0.2742
Epoch 20/20

  1/156 [..............................] - ETA: 0s - loss: 1.8796 - accuracy: 0.2500
 23/156 [===>..........................] - ETA: 0s - loss: 1.8840 - accuracy: 0.3050
 55/156 [=========>....................] - ETA: 0s - loss: 1.9042 - accuracy: 0.2963
 91/156 [================>.............] - ETA: 0s - loss: 1.9076 - accuracy: 0.2948
131/156 [========================>.....] - ETA: 0s - loss: 1.9087 - accuracy: 0.2970
156/156 [==============================] - 0s 2ms/step - loss: 1.9085 - accuracy: 0.2956 - val_loss: 1.9181 - val_accuracy: 0.2764

WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'dict'> input: {'number_in_household': <tf.Tensor 'ExpandDims_2:0' shape=(None, 1) dtype=int32>, 'marital_status': <tf.Tensor 'ExpandDims_1:0' shape=(None, 1) dtype=string>, 'householder_status': <tf.Tensor 'ExpandDims:0' shape=(None, 1) dtype=string>}
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'dict'> input: {'number_in_household': <tf.Tensor 'ExpandDims_2:0' shape=(None, 1) dtype=int32>, 'marital_status': <tf.Tensor 'ExpandDims_1:0' shape=(None, 1) dtype=string>, 'householder_status': <tf.Tensor 'ExpandDims:0' shape=(None, 1) dtype=string>}
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'dict'> input: {'number_in_household': <tf.Tensor 'ExpandDims_2:0' shape=(None, 1) dtype=int32>, 'marital_status': <tf.Tensor 'ExpandDims_1:0' shape=(None, 1) dtype=string>, 'householder_status': <tf.Tensor 'ExpandDims:0' shape=(None, 1) dtype=string>}
Consider rewriting this model with the Functional API.
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_features (DenseFeature multiple                  6         
_________________________________________________________________
hidden1 (Dense)              multiple                  304       
_________________________________________________________________
hidden2 (Dense)              multiple                  136       
_________________________________________________________________
dropout (Dropout)            multiple                  0         
_________________________________________________________________
output (Dense)               multiple                  81        
=================================================================
Total params: 527
Trainable params: 527
Non-trainable params: 0
_________________________________________________________________

On entraine le modele binaire en choisissant un taux d’apprentissage de 0.01 et en ajoutant de la regularisation par “early stopping” basee sur l’AUC : on stoppe l’entrainement lorsque la metrique sur l’echantillon de validation cesse de s’ameliorer sur une succession de 10 epoques virtuelles.

train_ds_bin = df_to_dataset(train, batch_size = TRAIN_BATCH_SIZE, dico = dico_income_bin)
val_ds_bin = df_to_dataset(val, shuffle = False, batch_size = TRAIN_BATCH_SIZE, 
dico = dico_income_bin, nb_repet = 1)
test_ds_bin = df_to_dataset(test, shuffle = False, batch_size = TRAIN_BATCH_SIZE, 
dico = dico_income_bin, nb_repet = 1)

modele_binaire.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = 0.01),
              loss = 'binary_crossentropy',
              metrics = metriques_bin)
              
early_stopping = tf.keras.callbacks.EarlyStopping(
    monitor = 'val_auc', 
    verbose = 1,
    patience = 10,
    mode = 'max',
    restore_best_weights = True)
    
history_binaire = modele_binaire.fit(x = train_ds_bin,
                    steps_per_epoch = steps_per_epoch,
                    epochs = NUM_CHECKPOINTS,
                    validation_data = val_ds_bin,
                    callbacks = [early_stopping],
                    verbose = 2
                   )
                   
modele_binaire.summary()
Epoch 1/20
156/156 - 3s - loss: 0.5906 - auc: 0.7517 - accuracy: 0.7043 - precision: 0.7227 - recall: 0.6689 - tp: 3359.0000 - fp: 1289.0000 - tn: 3673.0000 - fn: 1663.0000 - val_loss: 0.5517 - val_auc: 0.7851 - val_accuracy: 0.7389 - val_precision: 0.7489 - val_recall: 0.7217 - val_tp: 498.0000 - val_fp: 167.0000 - val_tn: 518.0000 - val_fn: 192.0000
Epoch 2/20
156/156 - 0s - loss: 0.5721 - auc: 0.7679 - accuracy: 0.7134 - precision: 0.7366 - recall: 0.6708 - tp: 3372.0000 - fp: 1206.0000 - tn: 3751.0000 - fn: 1655.0000 - val_loss: 0.5514 - val_auc: 0.7866 - val_accuracy: 0.7367 - val_precision: 0.7515 - val_recall: 0.7101 - val_tp: 490.0000 - val_fp: 162.0000 - val_tn: 523.0000 - val_fn: 200.0000
Epoch 3/20
156/156 - 0s - loss: 0.5684 - auc: 0.7714 - accuracy: 0.7218 - precision: 0.7443 - recall: 0.6828 - tp: 3437.0000 - fp: 1181.0000 - tn: 3769.0000 - fn: 1597.0000 - val_loss: 0.5533 - val_auc: 0.7825 - val_accuracy: 0.7360 - val_precision: 0.7429 - val_recall: 0.7246 - val_tp: 500.0000 - val_fp: 173.0000 - val_tn: 512.0000 - val_fn: 190.0000
Epoch 4/20
156/156 - 0s - loss: 0.5671 - auc: 0.7742 - accuracy: 0.7225 - precision: 0.7462 - recall: 0.6811 - tp: 3428.0000 - fp: 1166.0000 - tn: 3785.0000 - fn: 1605.0000 - val_loss: 0.5507 - val_auc: 0.7850 - val_accuracy: 0.7367 - val_precision: 0.7611 - val_recall: 0.6928 - val_tp: 478.0000 - val_fp: 150.0000 - val_tn: 535.0000 - val_fn: 212.0000
Epoch 5/20
156/156 - 0s - loss: 0.5650 - auc: 0.7748 - accuracy: 0.7249 - precision: 0.7517 - recall: 0.6781 - tp: 3412.0000 - fp: 1127.0000 - tn: 3825.0000 - fn: 1620.0000 - val_loss: 0.5504 - val_auc: 0.7821 - val_accuracy: 0.7375 - val_precision: 0.7632 - val_recall: 0.6913 - val_tp: 477.0000 - val_fp: 148.0000 - val_tn: 537.0000 - val_fn: 213.0000
Epoch 6/20
156/156 - 0s - loss: 0.5659 - auc: 0.7753 - accuracy: 0.7257 - precision: 0.7501 - recall: 0.6776 - tp: 3386.0000 - fp: 1128.0000 - tn: 3859.0000 - fn: 1611.0000 - val_loss: 0.5507 - val_auc: 0.7878 - val_accuracy: 0.7331 - val_precision: 0.7931 - val_recall: 0.6333 - val_tp: 437.0000 - val_fp: 114.0000 - val_tn: 571.0000 - val_fn: 253.0000
Epoch 7/20
156/156 - 0s - loss: 0.5628 - auc: 0.7782 - accuracy: 0.7274 - precision: 0.7576 - recall: 0.6831 - tp: 3473.0000 - fp: 1111.0000 - tn: 3789.0000 - fn: 1611.0000 - val_loss: 0.5497 - val_auc: 0.7855 - val_accuracy: 0.7302 - val_precision: 0.7764 - val_recall: 0.6493 - val_tp: 448.0000 - val_fp: 129.0000 - val_tn: 556.0000 - val_fn: 242.0000
Epoch 8/20
156/156 - 0s - loss: 0.5645 - auc: 0.7756 - accuracy: 0.7255 - precision: 0.7504 - recall: 0.6809 - tp: 3421.0000 - fp: 1138.0000 - tn: 3822.0000 - fn: 1603.0000 - val_loss: 0.5505 - val_auc: 0.7832 - val_accuracy: 0.7389 - val_precision: 0.7519 - val_recall: 0.7159 - val_tp: 494.0000 - val_fp: 163.0000 - val_tn: 522.0000 - val_fn: 196.0000
Epoch 9/20
156/156 - 0s - loss: 0.5636 - auc: 0.7767 - accuracy: 0.7274 - precision: 0.7462 - recall: 0.6970 - tp: 3513.0000 - fp: 1195.0000 - tn: 3749.0000 - fn: 1527.0000 - val_loss: 0.5476 - val_auc: 0.7876 - val_accuracy: 0.7375 - val_precision: 0.7527 - val_recall: 0.7101 - val_tp: 490.0000 - val_fp: 161.0000 - val_tn: 524.0000 - val_fn: 200.0000
Epoch 10/20
156/156 - 0s - loss: 0.5674 - auc: 0.7713 - accuracy: 0.7272 - precision: 0.7467 - recall: 0.6900 - tp: 3455.0000 - fp: 1172.0000 - tn: 3805.0000 - fn: 1552.0000 - val_loss: 0.5489 - val_auc: 0.7872 - val_accuracy: 0.7382 - val_precision: 0.7644 - val_recall: 0.6913 - val_tp: 477.0000 - val_fp: 147.0000 - val_tn: 538.0000 - val_fn: 213.0000
Epoch 11/20
156/156 - 0s - loss: 0.5646 - auc: 0.7766 - accuracy: 0.7254 - precision: 0.7491 - recall: 0.6897 - tp: 3494.0000 - fp: 1170.0000 - tn: 3748.0000 - fn: 1572.0000 - val_loss: 0.5603 - val_auc: 0.7823 - val_accuracy: 0.7345 - val_precision: 0.7379 - val_recall: 0.7304 - val_tp: 504.0000 - val_fp: 179.0000 - val_tn: 506.0000 - val_fn: 186.0000
Epoch 12/20
156/156 - 0s - loss: 0.5636 - auc: 0.7767 - accuracy: 0.7237 - precision: 0.7404 - recall: 0.6918 - tp: 3466.0000 - fp: 1215.0000 - tn: 3759.0000 - fn: 1544.0000 - val_loss: 0.5511 - val_auc: 0.7886 - val_accuracy: 0.7382 - val_precision: 0.7492 - val_recall: 0.7188 - val_tp: 496.0000 - val_fp: 166.0000 - val_tn: 519.0000 - val_fn: 194.0000
Epoch 13/20
156/156 - 0s - loss: 0.5638 - auc: 0.7764 - accuracy: 0.7260 - precision: 0.7477 - recall: 0.6914 - tp: 3491.0000 - fp: 1178.0000 - tn: 3757.0000 - fn: 1558.0000 - val_loss: 0.5490 - val_auc: 0.7860 - val_accuracy: 0.7353 - val_precision: 0.7426 - val_recall: 0.7232 - val_tp: 499.0000 - val_fp: 173.0000 - val_tn: 512.0000 - val_fn: 191.0000
Epoch 14/20
156/156 - 0s - loss: 0.5605 - auc: 0.7788 - accuracy: 0.7276 - precision: 0.7452 - recall: 0.6965 - tp: 3497.0000 - fp: 1196.0000 - tn: 3767.0000 - fn: 1524.0000 - val_loss: 0.5482 - val_auc: 0.7891 - val_accuracy: 0.7382 - val_precision: 0.7500 - val_recall: 0.7174 - val_tp: 495.0000 - val_fp: 165.0000 - val_tn: 520.0000 - val_fn: 195.0000
Epoch 15/20
156/156 - 0s - loss: 0.5631 - auc: 0.7772 - accuracy: 0.7260 - precision: 0.7455 - recall: 0.6912 - tp: 3472.0000 - fp: 1185.0000 - tn: 3776.0000 - fn: 1551.0000 - val_loss: 0.5514 - val_auc: 0.7890 - val_accuracy: 0.7389 - val_precision: 0.7527 - val_recall: 0.7145 - val_tp: 493.0000 - val_fp: 162.0000 - val_tn: 523.0000 - val_fn: 197.0000
Epoch 16/20
156/156 - 0s - loss: 0.5617 - auc: 0.7800 - accuracy: 0.7275 - precision: 0.7496 - recall: 0.6908 - tp: 3481.0000 - fp: 1163.0000 - tn: 3782.0000 - fn: 1558.0000 - val_loss: 0.5511 - val_auc: 0.7874 - val_accuracy: 0.7382 - val_precision: 0.7636 - val_recall: 0.6928 - val_tp: 478.0000 - val_fp: 148.0000 - val_tn: 537.0000 - val_fn: 212.0000
Epoch 17/20
156/156 - 0s - loss: 0.5619 - auc: 0.7786 - accuracy: 0.7250 - precision: 0.7457 - recall: 0.6888 - tp: 3464.0000 - fp: 1181.0000 - tn: 3774.0000 - fn: 1565.0000 - val_loss: 0.5539 - val_auc: 0.7851 - val_accuracy: 0.7396 - val_precision: 0.7500 - val_recall: 0.7217 - val_tp: 498.0000 - val_fp: 166.0000 - val_tn: 519.0000 - val_fn: 192.0000
Epoch 18/20
156/156 - 0s - loss: 0.5634 - auc: 0.7780 - accuracy: 0.7271 - precision: 0.7425 - recall: 0.6979 - tp: 3495.0000 - fp: 1212.0000 - tn: 3764.0000 - fn: 1513.0000 - val_loss: 0.5502 - val_auc: 0.7883 - val_accuracy: 0.7396 - val_precision: 0.7643 - val_recall: 0.6957 - val_tp: 480.0000 - val_fp: 148.0000 - val_tn: 537.0000 - val_fn: 210.0000
Epoch 19/20
156/156 - 0s - loss: 0.5592 - auc: 0.7823 - accuracy: 0.7296 - precision: 0.7499 - recall: 0.6992 - tp: 3536.0000 - fp: 1179.0000 - tn: 3748.0000 - fn: 1521.0000 - val_loss: 0.5489 - val_auc: 0.7895 - val_accuracy: 0.7360 - val_precision: 0.7437 - val_recall: 0.7232 - val_tp: 499.0000 - val_fp: 172.0000 - val_tn: 513.0000 - val_fn: 191.0000
Epoch 20/20
156/156 - 0s - loss: 0.5594 - auc: 0.7797 - accuracy: 0.7279 - precision: 0.7445 - recall: 0.7065 - tp: 3581.0000 - fp: 1229.0000 - tn: 3686.0000 - fn: 1488.0000 - val_loss: 0.5527 - val_auc: 0.7862 - val_accuracy: 0.7295 - val_precision: 0.7149 - val_recall: 0.7667 - val_tp: 529.0000 - val_fp: 211.0000 - val_tn: 474.0000 - val_fn: 161.0000

WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'dict'> input: {'number_in_household': <tf.Tensor 'ExpandDims_2:0' shape=(None, 1) dtype=int32>, 'marital_status': <tf.Tensor 'ExpandDims_1:0' shape=(None, 1) dtype=string>, 'householder_status': <tf.Tensor 'ExpandDims:0' shape=(None, 1) dtype=string>}
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'dict'> input: {'number_in_household': <tf.Tensor 'ExpandDims_2:0' shape=(None, 1) dtype=int32>, 'marital_status': <tf.Tensor 'ExpandDims_1:0' shape=(None, 1) dtype=string>, 'householder_status': <tf.Tensor 'ExpandDims:0' shape=(None, 1) dtype=string>}
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'dict'> input: {'number_in_household': <tf.Tensor 'ExpandDims_2:0' shape=(None, 1) dtype=int32>, 'marital_status': <tf.Tensor 'ExpandDims_1:0' shape=(None, 1) dtype=string>, 'householder_status': <tf.Tensor 'ExpandDims:0' shape=(None, 1) dtype=string>}
Consider rewriting this model with the Functional API.
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_features_1 (DenseFeatu multiple                  6         
_________________________________________________________________
hidden1 (Dense)              multiple                  304       
_________________________________________________________________
hidden2 (Dense)              multiple                  136       
_________________________________________________________________
dropout_1 (Dropout)          multiple                  0         
_________________________________________________________________
output (Dense)               multiple                  9         
=================================================================
Total params: 455
Trainable params: 455
Non-trainable params: 0
_________________________________________________________________

2.0.4 Performance des modeles

Fonction de perte et metriques sur l’echantillon de test.

pd.DataFrame({"KPI": modele_softmax.metrics_names, 
"valeurs": modele_softmax.evaluate(test_ds, verbose = 0)})

pd.DataFrame({"KPI": modele_binaire.metrics_names, 
"valeurs": modele_binaire.evaluate(test_ds_bin, verbose = 0)})
        KPI   valeurs
0      loss  1.875248
1  accuracy  0.300872
         KPI     valeurs
0       loss    0.536878
1        auc    0.807006
2   accuracy    0.741279
3  precision    0.729378
4     recall    0.748886
5         tp  504.000000
6         fp  187.000000
7         tn  516.000000
8         fn  169.000000

Graphiques des performances du modele multi-classes selon l’epoque virtuelle.

dtf = pd.DataFrame(history_sofmax.history)
colonnes = dtf.columns

dtf = dtf.reset_index().rename(columns = {'index': 'virtual_epochs'})
dtf_tr = pd.melt(dtf, id_vars = ['virtual_epochs'], value_vars = colonnes, 
var_name ='noms_col', value_name='valeurs_col')

dtf_tr["metrique"] = dtf_tr.noms_col.str.replace("^val_", "", regex = True)
dtf_tr["echantillon"] = "val"
dtf_tr.loc[dtf_tr.noms_col == dtf_tr.metrique, "echantillon"] = "train"

g = sns.FacetGrid(dtf_tr, col="metrique", hue = "echantillon", sharey = False, col_wrap = 2)
g = g.map_dataframe(sns.lineplot, x = "virtual_epochs",y = "valeurs_col").add_legend()
plt.show()

Graphiques des performances du modele binaire selon l’epoque virtuelle.

dtf = pd.DataFrame(history_binaire.history)
colonnes = dtf.columns

dtf = dtf.reset_index().rename(columns = {'index': 'virtual_epochs'})
dtf_tr = pd.melt(dtf, id_vars = ['virtual_epochs'], value_vars = colonnes, 
var_name ='noms_col', value_name='valeurs_col')

dtf_tr["metrique"] = dtf_tr.noms_col.str.replace("^val_", "", regex = True)
dtf_tr["echantillon"] = "val"
dtf_tr.loc[dtf_tr.noms_col == dtf_tr.metrique, "echantillon"] = "train"

g = sns.FacetGrid(dtf_tr, col="metrique", hue = "echantillon", sharey = False, col_wrap = 3)
g = g.map_dataframe(sns.lineplot, x = "virtual_epochs",y = "valeurs_col").add_legend()
plt.show()

Les scores du modele multi-classes.

# on recupere un array numpy
predictions = modele_softmax.predict(test_ds)
predictions.round(2)[:5]

# controle : somme des probas = 1
tf.reduce_sum(tf.nn.softmax(predictions[:5]), axis = 1)
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'dict'> input: {'number_in_household': <tf.Tensor 'ExpandDims_2:0' shape=(None, 1) dtype=int32>, 'marital_status': <tf.Tensor 'ExpandDims_1:0' shape=(None, 1) dtype=string>, 'householder_status': <tf.Tensor 'ExpandDims:0' shape=(None, 1) dtype=string>}
Consider rewriting this model with the Functional API.
array([[0.01, 0.02, 0.03, 0.05, 0.05, 0.15, 0.17, 0.3 , 0.22],
       [0.12, 0.14, 0.11, 0.15, 0.13, 0.15, 0.11, 0.07, 0.02],
       [0.01, 0.02, 0.03, 0.05, 0.05, 0.15, 0.17, 0.3 , 0.22],
       [0.18, 0.14, 0.11, 0.14, 0.11, 0.13, 0.09, 0.07, 0.03],
       [0.06, 0.09, 0.08, 0.13, 0.12, 0.18, 0.16, 0.14, 0.06]],
      dtype=float32)
<tf.Tensor: shape=(5,), dtype=float32, numpy=array([1., 1., 1., 1., 1.], dtype=float32)>

La matrice de confusion du modele multi-classes sur l’echantillon test.

# les previsions, pour chaque exemple on retient la colonne de score maximal 
prev = pd.DataFrame(predictions)
prev = list(prev.idxmax(axis = 'columns'))

# le reel (les labels sont la 2eme composante du dataset de test)
reel = pd.concat([pd.DataFrame(dts[1], columns = ["x"]) for dts in iter(test_ds)])

pd.DataFrame({'Y': list(reel.x), 'Ypred': prev}).groupby(["Y", "Ypred"]).size(
).reset_index(name = "nb").pivot(index="Y", columns="Ypred")
          nb                  
Ypred      0    1     5      7
Y                             
0      225.0  3.0  19.0    8.0
1       67.0  1.0  26.0    9.0
2       66.0  2.0  38.0   13.0
3       61.0  3.0  34.0   29.0
4       44.0  1.0  33.0   21.0
5       42.0  1.0  58.0   53.0
6       35.0  NaN  40.0   68.0
7       40.0  NaN  43.0  130.0
8       30.0  NaN  17.0  116.0

La matrice de confusion du modele binaire sur l’echantillon test.

prev = modele_binaire.predict(test_ds_bin).squeeze()

# on choisit un seuil : 0.5 par defaut dans les metriques precedentes (vrais positifs, ...)
seuil = 0.5
prev = (prev >= seuil).astype(int)

# le reel 
reel = pd.concat([pd.DataFrame(dts[1], columns = ["x"]) for dts in iter(test_ds_bin)])

pd.DataFrame({'Y': list(reel.x), 'Ypred': prev}).groupby(["Y", "Ypred"]).size(
  ).reset_index(name = "nb").pivot(index="Y", columns="Ypred")
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'dict'> input: {'number_in_household': <tf.Tensor 'ExpandDims_2:0' shape=(None, 1) dtype=int32>, 'marital_status': <tf.Tensor 'ExpandDims_1:0' shape=(None, 1) dtype=string>, 'householder_status': <tf.Tensor 'ExpandDims:0' shape=(None, 1) dtype=string>}
Consider rewriting this model with the Functional API.
        nb     
Ypred    0    1
Y              
0      516  187
1      169  504

3 Modelisation multi-classes avec tf.estimator

On n’utilise pas l’API Keras cette fois mais tf.estimator, ce qui faciliterait l’enchainement de pretraitements, de modelisation et de deploiement sur la Google Cloud Platform.

Fonction de pre-traitement basee sur pandas_input_fn.

def make_input_fn(dataframe, shuffle= True, batch_size = TRAIN_BATCH_SIZE, 
dico = dico_income, num_epochs= None):
  dataframe = dataframe.copy()
  labels = dataframe.pop('income').map(dico)
  return tf.compat.v1.estimator.inputs.pandas_input_fn(
    x = dataframe,
    y = labels,
    batch_size = batch_size,
    num_epochs = num_epochs,
    shuffle = shuffle,
    queue_capacity = None,
    num_threads = 1
  )

On recupere les champs calcules plus haut.

def create_feature_cols():
  return feature_columns.values()

On definit deux classifieurs

  • le LinearClassifier qui est simplement une regression logistique
  • le DNNClassifier qui est un reseau a plusieus couches cachees
output_dir = os.path.join(os.getcwd(), 'tmp_tensorflow', 'trained_model')

# classifieur lineaire
myopt = tf.compat.v1.train.FtrlOptimizer(learning_rate = 0.2, l1_regularization_strength = 0.1)
estimator_lin = tf.compat.v1.estimator.LinearClassifier(model_dir = output_dir, 
n_classes= len(dico_income) ,feature_columns = create_feature_cols(), optimizer = myopt)

# classifieur DNN a 2 couches cachees
estimator_dnn = tf.compat.v1.estimator.DNNClassifier(hidden_units = [16, 8], 
model_dir = output_dir, n_classes= len(dico_income) ,feature_columns = create_feature_cols())

On precise quelques parametres pour l’apprentissage et le test.

num_train_steps = steps_per_epoch * NUM_CHECKPOINTS

# pour le jeu d'entrainement
train_spec = tf.estimator.TrainSpec(input_fn = make_input_fn(train, shuffle=True, num_epochs=None), 
                                    max_steps = num_train_steps)

# pour le jeu de validation, on evalue apres start_delay_secs secondes
# et on evalue toutes les throttle_secs secondes
eval_spec = tf.estimator.EvalSpec(input_fn = make_input_fn(val, shuffle = False, num_epochs = 1), 
                                  steps = None, 
                                  start_delay_secs = 2,
                                  throttle_secs = 4) 

On entraine et on evalue les deux modeles.

# on vide le repertoire et on le supprime car il sert aussi de checkpoint 
# et son contenu peut etre recharge pour poursuivre l'entrainement
shutil.rmtree(output_dir, ignore_errors = True) 
tf.estimator.train_and_evaluate(estimator_lin, train_spec, eval_spec)

shutil.rmtree(output_dir, ignore_errors = True)
tf.estimator.train_and_evaluate(estimator_dnn, train_spec, eval_spec)
({'accuracy': 0.26763636, 'average_loss': 1.9319504, 'loss': 120.7469, 'global_step': 3120}, [])

retour au debut du document