这是indexloc提供的服务,不要输入任何密码
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 52 additions & 2 deletions official/vision/beta/projects/mesh_rcnn/configs/mesh_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,12 @@

from official.vision.configs import common
from official.modeling import hyperparams # type: ignore
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.vision.beta.projects.mesh_rcnn import optimization
from official.vision.beta.projects.mesh_rcnn.tasks import mesh_rcnn
from official.vision.configs import common
from official.vision.configs import decoders
from official.vision.configs import backbones
from official.core import config_definitions as cfg
Expand Down Expand Up @@ -94,10 +100,54 @@ class MeshLosses(hyperparams.Config):
edge_weight: float = 0.1
true_num_samples: int = 5000
pred_num_samples: int = 5000

@dataclasses.dataclass
class MeshRCNN(hyperparams.Config):
backbone: backbones.Backbone = backbones.Backbone(
type='resnet', resnet=backbones.ResNet())
decoder: decoders.Decoder = decoders.Decoder(
type='fpn', fpn=decoders.FPN())
type='fpn', fpn=decoders.FPN())

@exp_factory.register_config_factory('mesh_training')
def mesh_training() -> cfg.ExperimentConfig:
"""COCO object detection with YOLOv3 and v4."""
train_batch_size = 256
eval_batch_size = 8
train_epochs = 300
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
validation_interval = 5

max_num_instances = 200
config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
train_steps=train_epochs * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=validation_interval * steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'ema': {
'average_decay': 0.9998,
'trainable_weights_only': False,
'dynamic_decay': True,
},
'optimizer': {
'type': 'adam',
'adam': {
'learning_rate' : 0.001,
'beta_1' : 0.9,
'beta_2' : 0.999,
'epsilon' : 1e-07
}
},
'learning_rate': {},
'warmup': {}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])

return config

22 changes: 22 additions & 0 deletions official/vision/beta/projects/mesh_rcnn/optimization/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Optimization package definition."""

# pylint: disable=wildcard-import
from official.modeling.optimization.configs.learning_rate_config import *
from official.modeling.optimization.ema_optimizer import ExponentialMovingAverage
from official.vision.beta.projects.mesh_rcnn.optimization.configs.optimization_config import *
from official.vision.beta.projects.mesh_rcnn.optimization.configs.optimizer_config import *
from official.vision.beta.projects.mesh_rcnn.optimization.optimizer_factory import OptimizerFactory as MeshOptimizerFactory
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Dataclasses for optimization configs.

This file define the dataclass for optimization configs (OptimizationConfig).
It also has two helper functions get_optimizer_config, and get_lr_config from
an OptimizationConfig class.
"""
import dataclasses
from typing import Optional

from official.modeling.optimization.configs import optimization_config as optimization_cfg
from official.vision.beta.projects.mesh_rcnn.optimization.configs import optimizer_config as opt_cfg


@dataclasses.dataclass
class OptimizerConfig(optimization_cfg.OptimizerConfig):
"""Configuration for optimizer.

Attributes:
type: 'str', type of optimizer to be used, on the of fields below.
sgd: sgd optimizer config.
adam: adam optimizer config.
adamw: adam with weight decay.
lamb: lamb optimizer.
rmsprop: rmsprop optimizer.
"""
type: Optional[str] = None
sgd_torch: opt_cfg.SGDTorchConfig = opt_cfg.SGDTorchConfig()


@dataclasses.dataclass
class OptimizationConfig(optimization_cfg.OptimizationConfig):
"""Configuration for optimizer and learning rate schedule.

Attributes:
optimizer: optimizer oneof config.
ema: optional exponential moving average optimizer config, if specified, ema
optimizer will be used.
learning_rate: learning rate oneof config.
warmup: warmup oneof config.
"""
type: Optional[str] = None
optimizer: OptimizerConfig = OptimizerConfig()
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Dataclasses for optimizer configs."""
import dataclasses
from typing import List, Optional

from official.modeling.hyperparams import base_config
from official.modeling.optimization.configs import optimizer_config


@dataclasses.dataclass
class BaseOptimizerConfig(base_config.Config):
"""Base optimizer config.

Attributes:
clipnorm: float >= 0 or None. If not None, Gradients will be clipped when
their L2 norm exceeds this value.
clipvalue: float >= 0 or None. If not None, Gradients will be clipped when
their absolute value exceeds this value.
global_clipnorm: float >= 0 or None. If not None, gradient of all weights is
clipped so that their global norm is no higher than this value
"""
clipnorm: Optional[float] = None
clipvalue: Optional[float] = None
global_clipnorm: Optional[float] = None


@dataclasses.dataclass
class SGDTorchConfig(optimizer_config.BaseOptimizerConfig):
"""Configuration for SGD optimizer.

The attributes for this class matches the arguments of tf.keras.optimizer.SGD.

Attributes:
name: name of the optimizer.
decay: decay rate for SGD optimizer.
nesterov: nesterov for SGD optimizer.
momentum_start: momentum starting point for SGD optimizer.
momentum: momentum for SGD optimizer.
"""
name: str = "SGD"
decay: float = 0.0
nesterov: bool = False
momentum_start: float = 0.0
momentum: float = 0.9
warmup_steps: int = 0
weight_decay: float = 0.0
weight_keys: Optional[List[str]] = dataclasses.field(
default_factory=lambda: ["kernel", "weight"])
bias_keys: Optional[List[str]] = dataclasses.field(
default_factory=lambda: ["bias", "beta"])
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Optimizer factory class."""

import gin

from official.modeling.optimization import ema_optimizer
from official.modeling.optimization import optimizer_factory
from official.vision.beta.projects.mesh_rcnn.optimization import sgd_torch

optimizer_factory.OPTIMIZERS_CLS.update({
'sgd_torch': sgd_torch.SGDTorch,
})

OPTIMIZERS_CLS = optimizer_factory.OPTIMIZERS_CLS
LR_CLS = optimizer_factory.LR_CLS
WARMUP_CLS = optimizer_factory.WARMUP_CLS


class OptimizerFactory(optimizer_factory.OptimizerFactory):
"""Optimizer factory class.

This class builds learning rate and optimizer based on an optimization config.
To use this class, you need to do the following:
(1) Define optimization config, this includes optimizer, and learning rate
schedule.
(2) Initialize the class using the optimization config.
(3) Build learning rate.
(4) Build optimizer.

This is a typical example for using this class:
params = {
'optimizer': {
'type': 'sgd',
'sgd': {'momentum': 0.9}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {'boundaries': [10000, 20000],
'values': [0.1, 0.01, 0.001]}
},
'warmup': {
'type': 'linear',
'linear': {'warmup_steps': 500, 'warmup_learning_rate': 0.01}
}
}
opt_config = OptimizationConfig(params)
opt_factory = OptimizerFactory(opt_config)
lr = opt_factory.build_learning_rate()
optimizer = opt_factory.build_optimizer(lr)
"""

def get_bias_lr_schedule(self, bias_lr):
"""Build learning rate.

Builds learning rate from config. Learning rate schedule is built according
to the learning rate config. If learning rate type is consant,
lr_config.learning_rate is returned.

Args:
bias_lr: learning rate config.

Returns:
tf.keras.optimizers.schedules.LearningRateSchedule instance. If
learning rate type is consant, lr_config.learning_rate is returned.
"""
if self._lr_type == 'constant':
lr = self._lr_config.learning_rate
else:
lr = LR_CLS[self._lr_type](**self._lr_config.as_dict())

if self._warmup_config:
if self._warmup_type != 'linear':
raise ValueError('Smart Bias is only supported currently with a'
'linear warm up.')
warm_up_cfg = self._warmup_config.as_dict()
warm_up_cfg['warmup_learning_rate'] = bias_lr
lr = WARMUP_CLS['linear'](lr, **warm_up_cfg)
return lr

@gin.configurable
def add_ema(self, optimizer):
"""Add EMA to the optimizer independently of the build optimizer method."""
if self._use_ema:
optimizer = ema_optimizer.ExponentialMovingAverage(
optimizer, **self._ema_config.as_dict())
return optimizer
Loading