python_code stringlengths 0 679k | repo_name stringlengths 9 41 | file_path stringlengths 6 149 |
|---|---|---|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/cc_admin.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions o... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/verifier.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions o... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/rim/__init__.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions o... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/rim/golden_measurement.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions o... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/nvml/__init__.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions o... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/nvml/gpu_cert_chains.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions o... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/nvml/test_handle.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions o... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/nvml/nvmlHandlerTest.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/utils/__init__.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions o... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/exceptions/__init__.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions o... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/exceptions/utils.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions o... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/attestation/spdm_msrt_req_msg.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions o... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/attestation/__init__.py |
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions o... | nvtrust-main | guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/attestation/spdm_msrt_resp_msg.py |
nvtrust-main | guest_tools/attestation_sdk/__init__.py | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
import nv_attestation_sdk
from nv_attestation_sdk import attestation
## testing
client = attestation.Attestation("inital-name")
print("Expecting initial-name")
print("node name :", client.get_... | nvtrust-main | guest_tools/attestation_sdk/tests/AttestationTest.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
from nv_attestation_sdk import attestation
client = attestation.Attestation()
client.set_name("thisNode1")
print ("[SmallGPUTest] node name :", client.get_name())
client.add_verifier(attestati... | nvtrust-main | guest_tools/attestation_sdk/tests/SmallGPUTest.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
from nv_attestation_sdk import attestation
client = attestation.Attestation("thisNode44")
print("node name :", client.get_name())
client.add_verifier(attestation.Devices.GPU, attestation.Envi... | nvtrust-main | guest_tools/attestation_sdk/tests/SmallCombinedTest.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
import sys
import os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
import src.nv_attestation_sdk.attestation as attestation
# from nv_attestation_sdk im... | nvtrust-main | guest_tools/attestation_sdk/tests/Test1.py |
nvtrust-main | guest_tools/attestation_sdk/tests/__init__.py | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
from nv_attestation_sdk import attestation
client = attestation.Attestation("have a nice day")
print("node name :", client.get_name())
client.add_verifier(attestation.Devices.CPU, attestation... | nvtrust-main | guest_tools/attestation_sdk/tests/SmallFauxTest.py |
nvtrust-main | guest_tools/attestation_sdk/src/__init__.py | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
from enum import IntFlag
from enum import IntEnum
from datetime import datetime
from nv_attestation_sdk.gpu import attest_gpu
import secrets
import jwt
import json
class Devices(IntFlag):
... | nvtrust-main | guest_tools/attestation_sdk/src/nv_attestation_sdk/attestation.py |
nvtrust-main | guest_tools/attestation_sdk/src/nv_attestation_sdk/gpu/__init__.py | |
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
import json
import jwt
from verifier import cc_admin
def validate_gpu_token(gpu_token: str, policy: str):
if policy == "" or gpu_token == "":
return False
policy_obj = json.loads(policy)
gpu_token_obj = jwt.decode(gpu_... | nvtrust-main | guest_tools/attestation_sdk/src/nv_attestation_sdk/gpu/attest_gpu.py |
#!/usr/bin/env python3
#
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distri... | nvtrust-main | host_tools/python/gpu_cc_tool.py |
import numpy as np
import matplotlib.pyplot as plt
lattice = np.loadtxt("final.txt", dtype=np.int32)
plt.imshow(lattice)
plt.title('Final Lattice Configuration')
plt.colorbar()
plt.show()
| ising-gpu-master | basic_cuda/plot_ising.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, c... | ising-gpu-master | basic_python/ising_basic.py |
import glob
import matplotlib.pyplot as plt
import numpy as np
files = sorted(glob.glob("final_rank*.txt"))
if (len(files) == 0):
raise Exception("Could not find any lattice files. Expecting files named 'final_rank*.txt' for processing")
lattice = np.loadtxt(files[0], dtype=np.int32)
for i,f in enumerate(files):... | ising-gpu-master | basic_python/plot_ising_multi.py |
import numpy as np
import matplotlib.pyplot as plt
lattice = np.loadtxt("final.txt", dtype=np.int32)
plt.imshow(lattice)
plt.title('Final Lattice Configuration')
plt.colorbar()
plt.show()
| ising-gpu-master | tensorcore/plot_ising.py |
#!/usr/bin/env python
import sys
import numpy as np
from matplotlib import pyplot as plt
data = []
f=open(sys.argv[1])
for l in f:
data.append([int(c) for c in l.strip(" \n\r")])
print len(data), 'x', len(data[0])
plt.imshow(data, interpolation='nearest')
outFile = sys.argv[1]+".png"
plt.savefig(outFile)
| ising-gpu-master | optimized/plotLattice.py |
import sys
import warnings
import os
import glob
from packaging.version import parse, Version
from setuptools import setup, find_packages
import subprocess
import torch
from torch.utils.cpp_extension import (
BuildExtension,
CppExtension,
CUDAExtension,
CUDA_HOME,
load,
)
# ninja build does not w... | apex-master | setup.py |
import logging
import warnings
# May help avoid undefined symbol errors https://pytorch.org/cppdocs/notes/faq.html#undefined-symbol-errors-from-pytorch-aten
import torch
__all__ = ["amp", "fp16_utils", "optimizers", "normalization", "transformer"]
if torch.distributed.is_available():
from . import parallel
... | apex-master | apex/__init__.py |
from typing import Optional, Sequence
import torch
__all__ = ["_cast_if_autocast_enabled"]
def _get_autocast_dtypes() -> Sequence[torch.dtype]:
if torch.cuda.is_bf16_supported():
return [torch.half, torch.bfloat16]
return [torch.half]
def _get_current_dtype(dtype: Optional[torch.dtype] = None) ->... | apex-master | apex/_autocast_utils.py |
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn import functional as F
import syncbn
from .optimized_sync_batchnorm_kernel import SyncBatchnormFunction
class SyncBatchNorm(_BatchNorm):
"""
synchronized batch normalization module extented from `torch.nn.BatchNormNd`
with the a... | apex-master | apex/parallel/optimized_sync_batchnorm.py |
import torch
from torch.autograd.function import Function
from apex.parallel import ReduceOp
class SyncBatchnormFunction(Function):
@staticmethod
def forward(ctx, input, weight, bias, running_mean, running_variance, eps, process_group, world_size):
torch.cuda.nvtx.range_push("sync_BN_fw")
# ... | apex-master | apex/parallel/sync_batchnorm_kernel.py |
import torch
if hasattr(torch.distributed, 'ReduceOp'):
ReduceOp = torch.distributed.ReduceOp
elif hasattr(torch.distributed, 'reduce_op'):
ReduceOp = torch.distributed.reduce_op
else:
ReduceOp = torch.distributed.deprecated.reduce_op
from .distributed import DistributedDataParallel, Reducer
# This is tri... | apex-master | apex/parallel/__init__.py |
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn import functional as F
from .sync_batchnorm_kernel import SyncBatchnormFunction
from apex.parallel import ReduceOp
class SyncBatchNorm(_BatchNorm):
"""
synchronized batch normalization module extented from ``torch.nn.BatchNormNd``
... | apex-master | apex/parallel/sync_batchnorm.py |
from collections import OrderedDict
import copy
import importlib
from itertools import chain
import torch
import torch.distributed as dist
from torch.nn.modules import Module
from torch.autograd import Variable
from ..multi_tensor_apply import multi_tensor_applier
imported_flatten_impl = False
def import_flatten_im... | apex-master | apex/parallel/distributed.py |
import torch
from torch.autograd.function import Function
import syncbn
from apex.parallel import ReduceOp
class SyncBatchnormFunction(Function):
@staticmethod
def forward(ctx, input, z, weight, bias, running_mean, running_variance, eps, track_running_stats = True, momentum = 1.0, process_group = None, chann... | apex-master | apex/parallel/optimized_sync_batchnorm_kernel.py |
import torch
from torch import nn
from torch.nn.parameter import Parameter
class LARC(object):
"""
:class:`LARC` is a pytorch implementation of both the scaling and clipping variants of LARC,
in which the ratio between gradient and parameter magnitudes is used to calculate an adaptive
local learning r... | apex-master | apex/parallel/LARC.py |
import torch
import sys
import subprocess
def docstring_hack():
"""
Multiproc file which will launch a set of processes locally for multi-gpu
usage: python -m apex.parallel.multiproc main.py ...
"""
pass
argslist = list(sys.argv)[1:]
world_size = torch.cuda.device_count()
if '--world-size' in arg... | apex-master | apex/parallel/multiproc.py |
import importlib
import numbers
import torch
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn import functional as F
from apex._autocast_utils import _cast_if_autocast_enabled
global fused_layer_norm_cuda
fused_layer_norm_cuda = None
# Reference implementation from Huggingface
def m... | apex-master | apex/normalization/fused_layer_norm.py |
from .fused_layer_norm import FusedLayerNorm, MixedFusedLayerNorm, FusedRMSNorm, MixedFusedRMSNorm
| apex-master | apex/normalization/__init__.py |
from .fused_dense import *
| apex-master | apex/fused_dense/__init__.py |
import torch
from torch import nn
import fused_dense_cuda
from apex._autocast_utils import _cast_if_autocast_enabled
#implements fused GEMM+bias in forward pass using mlp_cuda from apex
class FusedDenseFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
ctx.save_for_back... | apex-master | apex/fused_dense/fused_dense.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | apex-master | apex/transformer/enums.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | apex-master | apex/transformer/parallel_state.py |
import logging
import os
def get_transformer_logger(name: str) -> logging.Logger:
name_wo_ext = os.path.splitext(name)[0]
return logging.getLogger(name_wo_ext)
def set_logging_level(verbosity) -> None:
"""Change logging severity.
Args:
verbosity
"""
from apex import _library_root_lo... | apex-master | apex/transformer/log_util.py |
from apex.transformer import amp
from apex.transformer import functional
from apex.transformer import parallel_state
from apex.transformer import pipeline_parallel
from apex.transformer import tensor_parallel
from apex.transformer import utils
from apex.transformer.enums import LayerType
from apex.transformer.enums imp... | apex-master | apex/transformer/__init__.py |
from torch import distributed as dist
HAS_UCC = hasattr(dist, "is_ucc_available") and dist.is_ucc_available()
if not HAS_UCC:
try:
import torch_ucc
HAS_UCC = True
except ImportError:
HAS_UCC = False
| apex-master | apex/transformer/_ucc_util.py |
"""Utility functions used by both `pipeline_parallel` and `tensor_parallel`"""
import torch
from apex.transformer import parallel_state
# `all_gather_into_tensor` is new placeholders for `_all_gather_base`.
# It requires the most recent version of PyTorch.
# The following 4 lines are for backward comparability with... | apex-master | apex/transformer/utils.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | apex-master | apex/transformer/microbatches.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | apex-master | apex/transformer/tensor_parallel/cross_entropy.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless r... | apex-master | apex/transformer/tensor_parallel/memory.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... | apex-master | apex/transformer/tensor_parallel/__init__.py |
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless... | apex-master | apex/transformer/tensor_parallel/random.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | apex-master | apex/transformer/tensor_parallel/utils.py |
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless... | apex-master | apex/transformer/tensor_parallel/layers.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | apex-master | apex/transformer/tensor_parallel/data.py |
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless... | apex-master | apex/transformer/tensor_parallel/mappings.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
from apex.transformer.layers.layer_norm import FastLayerNorm
from apex.transformer.layers.layer_norm import FusedLayerNorm
from apex.transformer.layers.layer_norm import MixedFusedLayerNorm
__all__ = [
"FastLayerNorm",
"FusedLayerNorm",
"Mixed... | apex-master | apex/transformer/layers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# NOTE(mkozuki): This file defines two LayerNorm that are compatible with Megatron-LM.
# while avoiding introducing the breaking change of `"sequence_parallel_enabled"` attribute into apex.normalization.FusedLayerNorm
# and apex.contrib.layer_norm.FastLaye... | apex-master | apex/transformer/layers/layer_norm.py |
import time
import torch
class _Timer:
"""Timer."""
def __init__(self, name):
self.name_ = name
self.elapsed_ = 0.0
self.started_ = False
self.start_time = time.time()
def start(self):
"""Start the timer."""
assert not self.started_, "timer has already be... | apex-master | apex/transformer/pipeline_parallel/_timers.py |
from apex.transformer.pipeline_parallel.schedules import get_forward_backward_func
from apex.transformer.pipeline_parallel.schedules.common import build_model
__all__ = [
"get_forward_backward_func",
"build_model",
]
| apex-master | apex/transformer/pipeline_parallel/__init__.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | apex-master | apex/transformer/pipeline_parallel/utils.py |
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless... | apex-master | apex/transformer/pipeline_parallel/p2p_communication.py |
import contextlib
from typing import Any, List, Optional, Sequence, Union
import warnings
import torch
from apex.transformer import parallel_state
from apex.transformer.enums import ModelType
from apex.transformer.pipeline_parallel import p2p_communication
from apex.transformer.pipeline_parallel.p2p_communication imp... | apex-master | apex/transformer/pipeline_parallel/schedules/fwd_bwd_pipelining_without_interleaving.py |
import contextlib
from typing import List, Union, Optional
import torch
from apex.transformer.pipeline_parallel.utils import listify_model
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.utils import get_kth_microbatch
from apex.transformer.pipeline_pa... | apex-master | apex/transformer/pipeline_parallel/schedules/fwd_bwd_no_pipelining.py |
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import (
forward_backward_no_pipelining,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_with_interleav... | apex-master | apex/transformer/pipeline_parallel/schedules/__init__.py |
from typing import Any, Callable, Dict, List, Tuple, Union, Optional, Sequence
import torch
from torch.autograd.variable import Variable
from apex.normalization.fused_layer_norm import FusedLayerNorm
from apex.transformer import parallel_state
from apex.transformer.enums import ModelType
from apex.transformer.pipelin... | apex-master | apex/transformer/pipeline_parallel/schedules/common.py |
import contextlib
from typing import Any, Callable, List, Optional, Sequence, Union
import warnings
import torch
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel import p2p_communication
from apex.transformer.pipeline_parallel.schedules.common import Batch
from apex.transformer.pipe... | apex-master | apex/transformer/pipeline_parallel/schedules/fwd_bwd_pipelining_with_interleaving.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | apex-master | apex/transformer/testing/arguments.py |
apex-master | apex/transformer/testing/__init__.py | |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | apex-master | apex/transformer/testing/commons.py |
import contextlib
import torch
from apex.transformer import tensor_parallel
from apex.transformer.enums import AttnMaskType
from apex.transformer.enums import ModelType
from apex.transformer.layers import FusedLayerNorm as LayerNorm
from apex.transformer.testing.global_vars import get_args
from apex.transformer.testi... | apex-master | apex/transformer/testing/standalone_bert.py |
import os
import sys
import unittest
from packaging.version import Version, parse
import torch
from torch import distributed as dist
from torch.utils import collect_env
from torch.testing._internal import common_utils
from torch.testing._internal import common_distributed
from apex.transformer._ucc_util import HAS_UC... | apex-master | apex/transformer/testing/distributed_test_base.py |
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ap... | apex-master | apex/transformer/testing/standalone_gpt.py |
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless... | apex-master | apex/transformer/testing/standalone_transformer_lm.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | apex-master | apex/transformer/testing/global_vars.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... | apex-master | apex/transformer/amp/grad_scaler.py |
from apex.transformer.amp.grad_scaler import GradScaler
__all__ = [
"GradScaler",
]
| apex-master | apex/transformer/amp/__init__.py |
from apex.transformer._data._batchsampler import MegatronPretrainingRandomSampler
from apex.transformer._data._batchsampler import MegatronPretrainingSampler
__all__ = [
"MegatronPretrainingRandomSampler",
"MegatronPretrainingSampler",
]
| apex-master | apex/transformer/_data/__init__.py |
"""BatchSampler implementations for POC of dynamic batch size or rampup_batch_size support.
Implementations are based on https://github.com/NVIDIA/Megatron-LM/blob/bcd605f8570ebeeb0436c115ebbfafc3c5a40ae5/megatron/data/data_samplers.py.
""" # NOQA
import abc
import torch
__all__ = [
"MegatronPretrainingSampler... | apex-master | apex/transformer/_data/_batchsampler.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | apex-master | apex/transformer/functional/fused_softmax.py |
from apex.transformer.functional.fused_softmax import FusedScaleMaskSoftmax
__all__ = [
"FusedScaleMaskSoftmax",
]
| apex-master | apex/transformer/functional/__init__.py |
from .fp16util import (
BN_convert_float,
network_to_half,
prep_param_lists,
model_grads_to_master_grads,
master_params_to_model_params,
tofp16,
to_python_float,
clip_grad_norm,
convert_module,
convert_network,
FP16Model,
)
from .fp16_optimizer import FP16_Optimizer
from .lo... | apex-master | apex/fp16_utils/__init__.py |
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class tofp16(nn.Module):
"""
Utility module that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
... | apex-master | apex/fp16_utils/fp16util.py |
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from ..amp._amp_state import _amp_state, maybe_print
from ..amp.scaler import LossScaler
from ..multi_tensor_apply import multi_tensor... | apex-master | apex/fp16_utils/fp16_optimizer.py |
import torch
# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
class LossScaler:
"""
Class that manages a static loss scale. This class is intended to interact with
:class:`FP1... | apex-master | apex/fp16_utils/loss_scaler.py |
from .multi_tensor_apply import MultiTensorApply
multi_tensor_applier = MultiTensorApply(2048*32)
| apex-master | apex/multi_tensor_apply/__init__.py |
import torch
class MultiTensorApply(object):
available = False
warned = False
def __init__(self, chunk_size):
try:
import amp_C
MultiTensorApply.available = True
self.chunk_size = chunk_size
except ImportError as err:
MultiTensorApply.availab... | apex-master | apex/multi_tensor_apply/multi_tensor_apply.py |
apex-master | apex/contrib/__init__.py | |
import torch
import fused_index_mul_2d
class IndexMul2d_(torch.autograd.Function):
'''
Currently only support index in dimension 0 with a 2-dimension tensor.
The shape of indexed in1 must be same with in2. Now this kernel does not support broadcast.
The datatype must be float32 or float16.
'''
... | apex-master | apex/contrib/index_mul_2d/index_mul_2d.py |
from .index_mul_2d import index_mul_2d
| apex-master | apex/contrib/index_mul_2d/__init__.py |
from .sparse_masklib import create_mask
from .asp import ASP
| apex-master | apex/contrib/sparsity/__init__.py |
import types
import torch
from .sparse_masklib import create_mask
from .permutation_lib import Permutation
torchvision_imported=True
try:
import torchvision
except ImportError:
print("[ASP][Warning] torchvision cannot be imported.")
torchvision_imported=False
import json
import os
import string
import tim... | apex-master | apex/contrib/sparsity/asp.py |
import os
import torch
import json
import string
import time
import numpy as np
import sys
import builtins as __builtin__
import io
try:
from .permutation_search_kernels import accelerated_search_for_good_permutation, sum_after_2_to_4
print("[ASP][Info] permutation_search_kernels can be imported.")
except Impor... | apex-master | apex/contrib/sparsity/permutation_lib.py |
import sys
import torch
import numpy as np
import collections
from itertools import permutations
""" compute density (helper fn to compute % NNZs in a tensor) """
def fill(x):
return float(x.nonzero().size(0))/torch.numel(x)
""" reshape matrix into m-dimensional vectors: (h,w) -> (hw/m, m) """
def reshape_1d(mat... | apex-master | apex/contrib/sparsity/sparse_masklib.py |
from .permutation_utilities import *
################################################################################################################
# Exhaustive
# Try them all
# - order of columns within a group doesn't matter
# - order of groups doesn't matter
# - we can eliminate effective duplicates by de... | apex-master | apex/contrib/sparsity/permutation_search_kernels/exhaustive_search.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.