Skip to content

deel.lipdp.losses module

DP_KCosineSimilarity

Bases: Loss, DP_Loss

Source code in deel/lipdp/losses.py
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
class DP_KCosineSimilarity(Loss, DP_Loss):
    def __init__(
        self,
        K=1.0,
        axis=-1,
        reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
        name="cosine_similarity",
    ):
        super().__init__(reduction=reduction, name=name)
        # as the espilon is applied before the sqrt in tf.linalg.l2_normalize we
        # apply square to it
        self.K = K**2
        self.axis = axis

    @tf.function
    def call(self, y_true, y_pred):
        y_true = tf.linalg.l2_normalize(y_true, epsilon=self.K, axis=self.axis)
        y_pred = tf.linalg.l2_normalize(y_pred, epsilon=self.K, axis=self.axis)
        return -tf.reduce_sum(y_true * y_pred, axis=self.axis)

    def get_L(self):
        """Lipschitz constant of the loss wrt the logits."""
        return 1 / float(self.K)

get_L()

Lipschitz constant of the loss wrt the logits.

Source code in deel/lipdp/losses.py
60
61
62
def get_L(self):
    """Lipschitz constant of the loss wrt the logits."""
    return 1 / float(self.K)

DP_Loss

Source code in deel/lipdp/losses.py
34
35
36
37
class DP_Loss:
    def get_L(self):
        """Lipschitz constant of the loss wrt the logits."""
        raise NotImplementedError()

get_L()

Lipschitz constant of the loss wrt the logits.

Source code in deel/lipdp/losses.py
35
36
37
def get_L(self):
    """Lipschitz constant of the loss wrt the logits."""
    raise NotImplementedError()

DP_MeanAbsoluteError

Bases: MeanAbsoluteError, DP_Loss

Source code in deel/lipdp/losses.py
223
224
225
226
227
228
229
230
231
232
233
234
235
236
class DP_MeanAbsoluteError(tf.keras.losses.MeanAbsoluteError, DP_Loss):
    def __init__(
        self,
        reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
        name="MulticlassKR",
    ):
        r"""
        Mean Absolute Error
        """
        super(DP_MeanAbsoluteError, self).__init__(reduction=reduction, name=name)

    def get_L(self):
        """Lipschitz constant of the loss wrt the logits."""
        return 1.0

__init__(reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, name='MulticlassKR')

Mean Absolute Error

Source code in deel/lipdp/losses.py
224
225
226
227
228
229
230
231
232
def __init__(
    self,
    reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
    name="MulticlassKR",
):
    r"""
    Mean Absolute Error
    """
    super(DP_MeanAbsoluteError, self).__init__(reduction=reduction, name=name)

get_L()

Lipschitz constant of the loss wrt the logits.

Source code in deel/lipdp/losses.py
234
235
236
def get_L(self):
    """Lipschitz constant of the loss wrt the logits."""
    return 1.0

DP_MulticlassHKR

Bases: MulticlassHKR, DP_Loss

Source code in deel/lipdp/losses.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
class DP_MulticlassHKR(MulticlassHKR, DP_Loss):
    def __init__(
        self,
        alpha=10.0,
        min_margin=1.0,
        reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
        name="MulticlassHKR",
    ):
        """
        The multiclass version of HKR. This is done by computing the HKR term over each
        class and averaging the results.

        Note that `y_true` should be one-hot encoded or pre-processed with the
        `deel.lip.utils.process_labels_for_multi_gpu()` function.

        Using a multi-GPU/TPU strategy requires to set `multi_gpu` to True and to
        pre-process the labels `y_true` with the
        `deel.lip.utils.process_labels_for_multi_gpu()` function.

        Args:
            alpha (float): regularization factor
            min_margin (float): margin to enforce.
            multi_gpu (bool): set to True when running on multi-GPU/TPU
            reduction: passed to tf.keras.Loss constructor
            name (str): passed to tf.keras.Loss constructor

        """
        super(DP_MulticlassHKR, self).__init__(
            alpha=alpha,
            min_margin=min_margin,
            multi_gpu=False,
            reduction=reduction,
            name=name,
        )

    def get_L(self):
        """Lipschitz constant of the loss wrt the logits."""
        return self.alpha + 1.0

__init__(alpha=10.0, min_margin=1.0, reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, name='MulticlassHKR')

The multiclass version of HKR. This is done by computing the HKR term over each class and averaging the results.

Note that y_true should be one-hot encoded or pre-processed with the deel.lip.utils.process_labels_for_multi_gpu() function.

Using a multi-GPU/TPU strategy requires to set multi_gpu to True and to pre-process the labels y_true with the deel.lip.utils.process_labels_for_multi_gpu() function.

Parameters:

Name Type Description Default
alpha float

regularization factor

10.0
min_margin float

margin to enforce.

1.0
multi_gpu bool

set to True when running on multi-GPU/TPU

required
reduction

passed to tf.keras.Loss constructor

SUM_OVER_BATCH_SIZE
name str

passed to tf.keras.Loss constructor

'MulticlassHKR'
Source code in deel/lipdp/losses.py
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
def __init__(
    self,
    alpha=10.0,
    min_margin=1.0,
    reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
    name="MulticlassHKR",
):
    """
    The multiclass version of HKR. This is done by computing the HKR term over each
    class and averaging the results.

    Note that `y_true` should be one-hot encoded or pre-processed with the
    `deel.lip.utils.process_labels_for_multi_gpu()` function.

    Using a multi-GPU/TPU strategy requires to set `multi_gpu` to True and to
    pre-process the labels `y_true` with the
    `deel.lip.utils.process_labels_for_multi_gpu()` function.

    Args:
        alpha (float): regularization factor
        min_margin (float): margin to enforce.
        multi_gpu (bool): set to True when running on multi-GPU/TPU
        reduction: passed to tf.keras.Loss constructor
        name (str): passed to tf.keras.Loss constructor

    """
    super(DP_MulticlassHKR, self).__init__(
        alpha=alpha,
        min_margin=min_margin,
        multi_gpu=False,
        reduction=reduction,
        name=name,
    )

get_L()

Lipschitz constant of the loss wrt the logits.

Source code in deel/lipdp/losses.py
155
156
157
def get_L(self):
    """Lipschitz constant of the loss wrt the logits."""
    return self.alpha + 1.0

DP_MulticlassHinge

Bases: MulticlassHinge, DP_Loss

Source code in deel/lipdp/losses.py
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
class DP_MulticlassHinge(MulticlassHinge, DP_Loss):
    def __init__(
        self,
        min_margin=1.0,
        reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
        name="MulticlassHinge",
    ):
        """
        Loss to estimate the Hinge loss in a multiclass setup. It computes the
        element-wise Hinge term. Note that this formulation differs from the one
        commonly found in tensorflow/pytorch (which maximises the difference between
        the two largest logits). This formulation is consistent with the binary
        classification loss used in a multiclass fashion.

        Note that `y_true` should be one-hot encoded or pre-processed with the
        `deel.lip.utils.process_labels_for_multi_gpu()` function.

        Args:
            min_margin (float): margin to enforce.
            reduction: passed to tf.keras.Loss constructor
            name (str): passed to tf.keras.Loss constructor

        """
        super(DP_MulticlassHinge, self).__init__(
            min_margin=min_margin, reduction=reduction, name=name
        )

    def get_L(self):
        """Lipschitz constant of the loss wrt the logits."""
        return 1.0

__init__(min_margin=1.0, reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, name='MulticlassHinge')

Loss to estimate the Hinge loss in a multiclass setup. It computes the element-wise Hinge term. Note that this formulation differs from the one commonly found in tensorflow/pytorch (which maximises the difference between the two largest logits). This formulation is consistent with the binary classification loss used in a multiclass fashion.

Note that y_true should be one-hot encoded or pre-processed with the deel.lip.utils.process_labels_for_multi_gpu() function.

Parameters:

Name Type Description Default
min_margin float

margin to enforce.

1.0
reduction

passed to tf.keras.Loss constructor

SUM_OVER_BATCH_SIZE
name str

passed to tf.keras.Loss constructor

'MulticlassHinge'
Source code in deel/lipdp/losses.py
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
def __init__(
    self,
    min_margin=1.0,
    reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
    name="MulticlassHinge",
):
    """
    Loss to estimate the Hinge loss in a multiclass setup. It computes the
    element-wise Hinge term. Note that this formulation differs from the one
    commonly found in tensorflow/pytorch (which maximises the difference between
    the two largest logits). This formulation is consistent with the binary
    classification loss used in a multiclass fashion.

    Note that `y_true` should be one-hot encoded or pre-processed with the
    `deel.lip.utils.process_labels_for_multi_gpu()` function.

    Args:
        min_margin (float): margin to enforce.
        reduction: passed to tf.keras.Loss constructor
        name (str): passed to tf.keras.Loss constructor

    """
    super(DP_MulticlassHinge, self).__init__(
        min_margin=min_margin, reduction=reduction, name=name
    )

get_L()

Lipschitz constant of the loss wrt the logits.

Source code in deel/lipdp/losses.py
187
188
189
def get_L(self):
    """Lipschitz constant of the loss wrt the logits."""
    return 1.0

DP_MulticlassKR

Bases: MulticlassKR, DP_Loss

Source code in deel/lipdp/losses.py
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
class DP_MulticlassKR(MulticlassKR, DP_Loss):
    def __init__(
        self,
        reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
        name="MulticlassKR",
    ):
        r"""
        Loss to estimate average of Wasserstein-1 distance using Kantorovich-Rubinstein
        duality over outputs. In this multiclass setup, the KR term is computed for each
        class and then averaged.

        Note that `y_true` should be one-hot encoded or pre-processed with the
        `deel.lip.utils.process_labels_for_multi_gpu()` function.

        Using a multi-GPU/TPU strategy requires to set `multi_gpu` to True and to
        pre-process the labels `y_true` with the
        `deel.lip.utils.process_labels_for_multi_gpu()` function.

        Args:
            multi_gpu (bool): set to True when running on multi-GPU/TPU
            reduction: passed to tf.keras.Loss constructor
            name (str): passed to tf.keras.Loss constructor

        """
        super(DP_MulticlassKR, self).__init__(reduction=reduction, name=name)

    def get_L(self):
        """Lipschitz constant of the loss wrt the logits."""
        return 1.0

__init__(reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, name='MulticlassKR')

Loss to estimate average of Wasserstein-1 distance using Kantorovich-Rubinstein duality over outputs. In this multiclass setup, the KR term is computed for each class and then averaged.

Note that y_true should be one-hot encoded or pre-processed with the deel.lip.utils.process_labels_for_multi_gpu() function.

Using a multi-GPU/TPU strategy requires to set multi_gpu to True and to pre-process the labels y_true with the deel.lip.utils.process_labels_for_multi_gpu() function.

Parameters:

Name Type Description Default
multi_gpu bool

set to True when running on multi-GPU/TPU

required
reduction

passed to tf.keras.Loss constructor

SUM_OVER_BATCH_SIZE
name str

passed to tf.keras.Loss constructor

'MulticlassKR'
Source code in deel/lipdp/losses.py
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
def __init__(
    self,
    reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
    name="MulticlassKR",
):
    r"""
    Loss to estimate average of Wasserstein-1 distance using Kantorovich-Rubinstein
    duality over outputs. In this multiclass setup, the KR term is computed for each
    class and then averaged.

    Note that `y_true` should be one-hot encoded or pre-processed with the
    `deel.lip.utils.process_labels_for_multi_gpu()` function.

    Using a multi-GPU/TPU strategy requires to set `multi_gpu` to True and to
    pre-process the labels `y_true` with the
    `deel.lip.utils.process_labels_for_multi_gpu()` function.

    Args:
        multi_gpu (bool): set to True when running on multi-GPU/TPU
        reduction: passed to tf.keras.Loss constructor
        name (str): passed to tf.keras.Loss constructor

    """
    super(DP_MulticlassKR, self).__init__(reduction=reduction, name=name)

get_L()

Lipschitz constant of the loss wrt the logits.

Source code in deel/lipdp/losses.py
218
219
220
def get_L(self):
    """Lipschitz constant of the loss wrt the logits."""
    return 1.0

DP_TauBCE

Bases: BinaryCrossentropy, DP_Loss

Source code in deel/lipdp/losses.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
class DP_TauBCE(tf.keras.losses.BinaryCrossentropy, DP_Loss):
    def __init__(
        self,
        tau,
        reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
        name="TauBCE",
    ):
        """
        Similar to original binary crossentropy, but with a settable temperature
        parameter.

        Args:
            tau (float): temperature parameter.
            reduction: reduction of the loss, must be SUM_OVER_BATCH_SIZE in order have a correct accounting.
            name (str): name of the loss
        """
        super().__init__(from_logits=True, reduction=reduction, name=name)
        self.tau = tau

    def call(self, y_true, y_pred):
        y_pred = y_pred * self.tau
        return super().call(y_true, y_pred) / self.tau

    def get_L(self):
        """Lipschitz constant of the loss wrt the logits."""
        # as the implementation divide the loss by self.tau (and as it is used with "from_logit=True")
        return 1.0

__init__(tau, reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, name='TauBCE')

Similar to original binary crossentropy, but with a settable temperature parameter.

Parameters:

Name Type Description Default
tau float

temperature parameter.

required
reduction

reduction of the loss, must be SUM_OVER_BATCH_SIZE in order have a correct accounting.

SUM_OVER_BATCH_SIZE
name str

name of the loss

'TauBCE'
Source code in deel/lipdp/losses.py
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
def __init__(
    self,
    tau,
    reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
    name="TauBCE",
):
    """
    Similar to original binary crossentropy, but with a settable temperature
    parameter.

    Args:
        tau (float): temperature parameter.
        reduction: reduction of the loss, must be SUM_OVER_BATCH_SIZE in order have a correct accounting.
        name (str): name of the loss
    """
    super().__init__(from_logits=True, reduction=reduction, name=name)
    self.tau = tau

get_L()

Lipschitz constant of the loss wrt the logits.

Source code in deel/lipdp/losses.py
114
115
116
117
def get_L(self):
    """Lipschitz constant of the loss wrt the logits."""
    # as the implementation divide the loss by self.tau (and as it is used with "from_logit=True")
    return 1.0

DP_TauCategoricalCrossentropy

Bases: TauCategoricalCrossentropy, DP_Loss

Source code in deel/lipdp/losses.py
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
class DP_TauCategoricalCrossentropy(TauCategoricalCrossentropy, DP_Loss):
    def __init__(
        self,
        tau,
        reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
        name="TauCategoricalCrossentropy",
    ):
        """
        Similar to original categorical crossentropy, but with a settable temperature
        parameter.

        Args:
            tau (float): temperature parameter.
            reduction: reduction of the loss, must be SUM_OVER_BATCH_SIZE in order have a correct accounting.
            name (str): name of the loss
        """
        super(DP_TauCategoricalCrossentropy, self).__init__(
            tau=tau, reduction=reduction, name=name
        )

    def get_L(self):
        """Lipschitz constant of the loss wrt the logits."""
        # as the implementation divide the loss by self.tau (and as it is used with "from_logit=True")
        return math.sqrt(2)

__init__(tau, reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, name='TauCategoricalCrossentropy')

Similar to original categorical crossentropy, but with a settable temperature parameter.

Parameters:

Name Type Description Default
tau float

temperature parameter.

required
reduction

reduction of the loss, must be SUM_OVER_BATCH_SIZE in order have a correct accounting.

SUM_OVER_BATCH_SIZE
name str

name of the loss

'TauCategoricalCrossentropy'
Source code in deel/lipdp/losses.py
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def __init__(
    self,
    tau,
    reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
    name="TauCategoricalCrossentropy",
):
    """
    Similar to original categorical crossentropy, but with a settable temperature
    parameter.

    Args:
        tau (float): temperature parameter.
        reduction: reduction of the loss, must be SUM_OVER_BATCH_SIZE in order have a correct accounting.
        name (str): name of the loss
    """
    super(DP_TauCategoricalCrossentropy, self).__init__(
        tau=tau, reduction=reduction, name=name
    )

get_L()

Lipschitz constant of the loss wrt the logits.

Source code in deel/lipdp/losses.py
85
86
87
88
def get_L(self):
    """Lipschitz constant of the loss wrt the logits."""
    # as the implementation divide the loss by self.tau (and as it is used with "from_logit=True")
    return math.sqrt(2)