Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions federatedscope/contrib/optimizer/example.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
from federatedscope.register import register_optimizer


def call_my_optimizer(model, type, lr, **kwargs):
def call_my_optimizer(type):
try:
import torch.optim as optim
except ImportError:
optim = None
optimizer = None

if type == 'myoptimizer':
if type.lower() == 'myoptimizer':
if optim is not None:
optimizer = optim.Adam(model.parameters(), lr=lr, **kwargs)
optimizer = optim.Adam
return optimizer


Expand Down
6 changes: 6 additions & 0 deletions federatedscope/core/auxiliaries/model_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,12 @@ def get_model(model_config, local_data=None, backend='torch'):
elif model_config.type.lower() in ['vmfnet', 'hmfnet']:
from federatedscope.mf.model.model_builder import get_mfnet
model = get_mfnet(model_config, input_shape)
elif model_config.type.lower() == 'fmlinearregression':
from federatedscope.differential_privacy.model.fm_linear_regression import FMLinearRegression
model = FMLinearRegression(in_channels=input_shape[-1], epsilon=0.5)
elif model_config.type.lower() == 'fmlogisticregression':
from federatedscope.differential_privacy.model.fm_logistic_regression import FMLogisticRegression
model = FMLogisticRegression(in_channels=input_shape[-1], epsilon=0.5)
else:
raise ValueError('Model {} is not provided'.format(model_config.type))

Expand Down
15 changes: 8 additions & 7 deletions federatedscope/core/auxiliaries/optimizer_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,10 @@

try:
from federatedscope.contrib.optimizer import *
from federatedscope.differential_privacy.optimizers import *
except ImportError as error:
logger.warning(
f'{error} in `federatedscope.contrib.optimizer`, some modules are not '
f'{error} in `federatedscope.contrib.optimizer` or `federatedscope.differential_privacy.optimizers`, some modules are not '
f'available.')


Expand All @@ -28,12 +29,6 @@ def get_optimizer(model, type, lr, **kwargs):
del tmp_kwargs['__cfg_check_funcs__']
if 'is_ready_for_run' in tmp_kwargs:
del tmp_kwargs['is_ready_for_run']

for func in register.optimizer_dict.values():
optimizer = func(model, type, lr, **tmp_kwargs)
if optimizer is not None:
return optimizer

if isinstance(type, str):
if hasattr(torch.optim, type):
if isinstance(model, torch.nn.Module):
Expand All @@ -42,6 +37,12 @@ def get_optimizer(model, type, lr, **kwargs):
else:
return getattr(torch.optim, type)(model, lr, **tmp_kwargs)
else:
# registered optimizers
for func in register.optimizer_dict.values():
optimizer = func(type)
if optimizer is not None:
return optimizer(model.parameters(), lr, **tmp_kwargs)

raise NotImplementedError(
'Optimizer {} not implement'.format(type))
else:
Expand Down
2 changes: 1 addition & 1 deletion federatedscope/core/auxiliaries/regularizer_builder.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from federatedscope.register import regularizer_dict
from federatedscope.core.regularizer.proximal_regularizer import *
from federatedscope.core.regularizer import *
try:
from torch.nn import Module
except ImportError:
Expand Down
3 changes: 3 additions & 0 deletions federatedscope/core/auxiliaries/trainer_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
"fedvattrainer": "FedVATTrainer",
"fedfocaltrainer": "FedFocalTrainer",
"mftrainer": "MFTrainer",
"fmtrainer": "FMTrainer"
}


Expand Down Expand Up @@ -80,6 +81,8 @@ def get_trainer(model=None,
dict_path = "federatedscope.gfl.flitplus.trainer"
elif config.trainer.type.lower() in ['mftrainer']:
dict_path = "federatedscope.mf.trainer.trainer"
elif config.trainer.type.lower() == 'fmtrainer':
dict_path = "federatedscope.differential_privacy.trainers.fmtrainer"
else:
raise ValueError

Expand Down
5 changes: 4 additions & 1 deletion federatedscope/core/regularizer/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,4 @@
from federatedscope.core.regularizer.proximal_regularizer import *
from federatedscope.core.regularizer.proximal_regularizer import ProximalRegularizer
from federatedscope.core.regularizer.l2_regularizer import L2Regularizer

__all__ = ['ProximalRegularizer', 'L2Regularizer']
33 changes: 33 additions & 0 deletions federatedscope/core/regularizer/l2_regularizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
from federatedscope.register import register_regularizer
from torch.nn import Module

import torch

REGULARIZER_NAME = "l2_regularizer"


class L2Regularizer(Module):
"""Returns the l2 norm of weight

Returns:
Tensor: the norm of the given udpate.
"""
def __init__(self):
super(L2Regularizer, self).__init__()

def forward(self, ctx, skip_bn=False):
l2_norm = 0.
for name, param in ctx.model.named_parameters():
if skip_bn and 'bn' in name:
continue
l2_norm += torch.sum(param**2)
return l2_norm


def call_l2_regularizer(type):
if type == REGULARIZER_NAME:
regularizer = L2Regularizer
return regularizer


register_regularizer(REGULARIZER_NAME, call_l2_regularizer)
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
use_gpu: True
device: 0
early_stop:
patience: 5
seed: 12345
federate:
mode: standalone
total_round_num: 300
sample_client_rate: 0.2
data:
root: data/
type: femnist
splits: [0.6,0.2,0.2]
batch_size: 10
subsample: 0.05
num_workers: 0
transform: [['ToTensor'], ['Normalize', {'mean': [0.1307], 'std': [0.3081]}]]
model:
type: convnet2
hidden: 2048
out_channels: 62
dropout: 0.0
train:
local_update_steps: 1
batch_or_epoch: epoch
optimizer:
type: DPGaussianSGD
lr: 0.01
l2_norm_sensitivity: 0.1
noise_multiplier: 0.1
grad:
grad_clip: 5.0
criterion:
type: CrossEntropyLoss
trainer:
type: cvtrainer
eval:
freq: 10
metrics: ['acc', 'correct']
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
use_gpu: True
federate:
mode: 'standalone'
total_round_num: 500
client_num: 10
seed: 12345
trainer:
type: 'FMTrainer'
train:
local_update_steps: 10
batch_or_epoch: 'batch'
optimizer:
type: 'SGD'
lr: 0.01
eval:
freq: 20
metrics: ['loss_regular']
count_flops: False
model:
type: 'FMLinearRegression'
data:
type: 'toy'
criterion:
type: MSELoss
regularizer:
type: 'l2_regularizer'
mu: 0.01
Empty file.
27 changes: 27 additions & 0 deletions federatedscope/differential_privacy/composition/compositor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@

import numpy as np

# For a client
class AdvancedComposition(object):
pass


class PrivacyAccountantComposition(object):
pass


class RenyiComposition(object):
def __init__(self, sample_rate):
self.orders = [1.5, 1.75, 2, 2.5, 3, 4, 5, 6, 8, 16, 32, 64]

self.budgets = np.zeros_like(self.orders)

self.epsilon = 0

# sampling rate
# alpha rate

def compose(self, scale):
for i, order in enumerate(self.orders):
epsilon = order / scale ** 2
self.budgets[i] += epsilon
4 changes: 4 additions & 0 deletions federatedscope/differential_privacy/model/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from federatedscope.differential_privacy.model.fm_linear_regression import FMLinearRegression
from federatedscope.differential_privacy.model.fm_logistic_regression import FMLogisticRegression

__all__ = ['FMLinearRegression', 'FMLogisticRegression']
52 changes: 52 additions & 0 deletions federatedscope/differential_privacy/model/fm_linear_regression.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
from torch.nn import Parameter
from torch.nn import Module
from torch.nn.init import kaiming_normal_

import numpy as np

import torch
import math


class FMLinearRegression(Module):
"""Implementation of Functional Mechanism for linear regression refer to
`Functional Mechanism: Regression Analysis under Differential Privacy`
[Jun Wang, et al.](https://arxiv.org/abs/1208.0219)

Args:
in_channels (int): the number of dimensions
epsilon (int): the epsilon bound for differential privacy

Note:
The forward function returns the average loss directly, so that we
don't need the criterion function for fm linear regression.
"""
def __init__(self, in_channels, epsilon):
super(FMLinearRegression, self).__init__()
self.w = Parameter(torch.empty(in_channels, 1))
kaiming_normal_(self.w, a=math.sqrt(5))

sensitivity = float(2*(1+2*in_channels+in_channels**2))

self.laplace = torch.distributions.laplace.Laplace(loc=0, scale=sensitivity / epsilon * np.sqrt(2))

def forward(self, x, y):
# J=0
lambda0 = torch.matmul(y.t(), y)
lambda0 += self.laplace.sample(sample_shape=lambda0.size()).to(lambda0.device)
# J=1
lambda1 = -2 * torch.matmul(y.t(), x)
lambda1 += self.laplace.sample(sample_shape=lambda1.size()).to(lambda1.device)
# J=2
lambda2 = torch.matmul(x.t(), x)
lambda2 += self.laplace.sample(sample_shape=lambda2.size()).to(lambda2.device)
w2 = torch.matmul(self.w, self.w.t())

loss_total = lambda0 + torch.sum(lambda1.t() * self.w) + torch.sum(lambda2*w2)

pred = torch.matmul(x, self.w)

return pred, loss_total / x.size(0)



Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
from torch.nn import Parameter
from torch.nn import Module
from torch.nn.init import kaiming_normal_

import numpy as np

import torch


class FMLogisticRegression(Module):
"""Implementation of Functional Mechanism for logistic regression refer to
`Functional Mechanism: Regression Analysis under Differential Privacy`
[Jun Wang, et al.](https://arxiv.org/abs/1208.0219)

Args:
in_channels (int): the number of dimensions
epsilon (int): the epsilon bound for differential privacy

Note:
The forward function returns the average loss directly, so that we
don't need the criterion function for fm logistic regression.
"""
def __init__(self, in_channels, epsilon):
super(FMLogisticRegression, self).__init__()
self.w = Parameter(torch.empty(in_channels, 1))
kaiming_normal_(self.w)

sensitivity = 0.25 * in_channels ** 2 + 3 * in_channels
self.laplace = torch.distributions.laplace.Laplace(loc=0, scale=sensitivity / epsilon * np.sqrt(2))

def forward(self, x, y):
if len(y.size()) == 1:
y = torch.unsqueeze(y, dim=-1)
# J=0
lambda0 = np.log(2)
lambda0 += self.laplace.sample(sample_shape=[1]).to(x.device)
# J=1
lambda1 = 0.5 * x - y * x
lambda1 += self.laplace.sample(sample_shape=lambda1.size()).to(lambda1.device)
# J=2
lambda2 = torch.matmul(x.t(), x)
lambda2 += self.laplace.sample(sample_shape=lambda2.size()).to(lambda2.device)
w2 = torch.matmul(self.w, self.w.t())

loss_total = lambda0 * x.size(0) + torch.sum(lambda1.t() * self.w) + 0.125 * torch.sum(lambda2 * w2)

pred = torch.matmul(x, self.w)

return pred, loss_total / x.size(0)
3 changes: 3 additions & 0 deletions federatedscope/differential_privacy/optimizers/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from federatedscope.differential_privacy.optimizers.dp_optimizer import DPGaussianSGD, DPLaplaceSGD

__all__ = ['DPGaussianSGD', 'DPLaplaceSGD']
Loading