mirror of
https://github.com/davesarmoury/GLaDOS.git
synced 2025-09-26 22:31:26 +08:00
42 lines
1.7 KiB
Python
42 lines
1.7 KiB
Python
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import pytorch_lightning as pl
|
|
|
|
from nemo.collections.common.callbacks import LogEpochTimeCallback
|
|
from nemo.collections.tts.models import FastPitchModel
|
|
from nemo.core.config import hydra_runner
|
|
from nemo.utils import logging
|
|
from nemo.utils.exp_manager import exp_manager
|
|
|
|
|
|
@hydra_runner(config_path="conf", config_name="fastpitch_align_44100")
|
|
def main(cfg):
|
|
if hasattr(cfg.model.optim, 'sched'):
|
|
logging.warning("You are using an optimizer scheduler while finetuning. Are you sure this is intended?")
|
|
if cfg.model.optim.lr > 1e-3 or cfg.model.optim.lr < 1e-5:
|
|
logging.warning("The recommended learning rate for finetuning is 2e-4")
|
|
trainer = pl.Trainer(**cfg.trainer)
|
|
exp_manager(trainer, cfg.get("exp_manager", None))
|
|
model = FastPitchModel(cfg=cfg.model, trainer=trainer)
|
|
model.maybe_init_from_pretrained_checkpoint(cfg=cfg)
|
|
lr_logger = pl.callbacks.LearningRateMonitor()
|
|
epoch_time_logger = LogEpochTimeCallback()
|
|
trainer.callbacks.extend([lr_logger, epoch_time_logger])
|
|
trainer.fit(model)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main() # noqa pylint: disable=no-value-for-parameter
|