代码拉取完成,页面将自动刷新
同步操作将从 MindSpore/mindformers 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run MindFormer."""
import argparse
import os
import numpy as np
from mindspore.common import set_seed
from mindformers.tools.register import MindFormerConfig, ActionDict
from mindformers.common.parallel_config import build_parallel_config
from mindformers.tools.utils import str2bool
from mindformers.common.context import build_context
from mindformers.trainer import build_trainer
from mindformers.common.callback import build_callback
from mindformers.tools.cloud_adapter import cloud_monitor
@cloud_monitor()
def main(config):
"""main."""
# init context
set_seed(config.seed)
np.random.seed(config.seed)
cfts, profile_cb = build_context(config)
# build context config
config.logger.info(".........Build context config..........")
build_parallel_config(config)
config.logger.info("context config is:{}".format(config.parallel_config))
config.logger.info("moe config is:{}".format(config.moe_config))
# auto pull dataset if on ModelArts platform
if config.pretrain_dataset:
config.pretrain_dataset.data_loader.dataset_dir = cfts.get_dataset(
config.pretrain_dataset.data_loader.dataset_dir)
if config.eval_dataset:
config.eval_dataset.data_loader.dataset_dir = cfts.get_dataset(
config.eval_dataset.data_loader.dataset_dir)
# auto pull checkpoint if on ModelArts platform
if config.runner_config.load_checkpoint:
config.runner_config.load_checkpoint = cfts.get_checkpoint(config.runner_config.load_checkpoint)
# define callback
callbacks = []
if config.profile:
callbacks.append(profile_cb)
callbacks.extend(build_callback(config.callbacks))
config.callbacks = callbacks
trainer = build_trainer(config.trainer)
if config.run_status == 'train':
trainer.train(config)
elif config.run_status == 'eval':
trainer.evaluate(config)
elif config.run_status == 'predict':
trainer.predict(config)
if __name__ == "__main__":
work_path = os.path.dirname(os.path.abspath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument(
'--config',
default=os.path.join(
work_path, "configs/mae/run_mae_vit_base_p16_224_400ep.yaml"),
help='YAML config files')
parser.add_argument('--mode', default=None, type=int, help='context mode')
parser.add_argument('--device_id', default=None, type=int, help='device id')
parser.add_argument('--device_target', default=None, type=str, help='device target')
parser.add_argument('--run_status', default=None, type=str, help='open training')
parser.add_argument('--dataset_dir', default=None, type=str, help='dataset directory')
parser.add_argument('--checkpoint_path', default=None, type=str, help='load model checkpoint')
parser.add_argument('--seed', default=None, type=int, help='random seed')
parser.add_argument('--use_parallel', default=None, type=str2bool, help='whether use parallel mode')
parser.add_argument('--profile', default=None, type=str2bool, help='whether use profile analysis')
parser.add_argument(
'--options',
nargs='+',
action=ActionDict,
help='override some settings in the used config, the key-value pair'
'in xxx=yyy format will be merged into config file')
args_ = parser.parse_args()
config_ = MindFormerConfig(args_.config)
if args_.device_id is not None:
config_.context.device_id = args_.device_id
if args_.device_target is not None:
config_.context.device_target = args_.device_target
if args_.mode is not None:
config_.context.mode = args_.mode
if args_.run_status is not None:
config_.run_status = args_.run_status
if args_.seed is not None:
config_.seed = args_.seed
if args_.use_parallel is not None:
config_.use_parallel = args_.use_parallel
if args_.checkpoint_path is not None:
config_.checkpoint_path = args_.checkpoint_path
if args_.profile is not None:
config_.profile = args_.profile
if args_.options is not None:
config_.merge_from_dict(args_.options)
assert config_.run_status in ['train', 'eval', 'predict'], \
f"run status must be in {['train', 'eval', 'predict']}, but get {config_.run_status}"
main(config_)
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。