code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from datetime import datetime as dt import os from github import Github a : Tuple = [ """good first issue""", """good second issue""", """good difficult issue""", """feature request""", """new model""", """wip""", ] def __lowerCamelCase ( ...
338
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor a : Dict = logging.get_logger(__name__) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , *A , **A ) -> ...
338
1
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil a : Union[str, Any] = 1_0_0 a : Dict = set(range(3, NUM_PRIMES, 2)) primes.add(2) a : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime ...
338
'''simple docstring''' import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a : Union[str, An...
338
1
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCamelCase ( ) -> List[str]: raise RuntimeError("""CUDA out of memory.""" ) ...
338
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[str] = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConf...
338
1
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __lowerCamelCase ( _lowercase ) -> str: UpperCAmelCase : Optional[Any] = FileLock(str(tmpdir / """foo.lock""" ) ) UpperCAmelCase : List[st...
338
'''simple docstring''' import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) def __lowerCamelCase ( _lowercase ) -> Li...
338
1
'''simple docstring''' from __future__ import annotations import time import numpy as np a : Optional[Any] = [8, 5, 9, 7] a : List[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] a : Any = [ [3, 2, 1, 4], ...
338
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : ...
338
1
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .s...
338
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging i...
338
1
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( _lowercase , _lowercase ) -> list[int]: UpperCAmelCase : Dict = 0 UpperCAmelCase : Union[str, Any] = len(_lowercase ) - 1 while i < j: i...
338
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __lowerCamelCase ( _lowercase ) -> List[Any]: for i in range(0 , _lowercase ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="...
338
1
'''simple docstring''' from math import factorial def __lowerCamelCase ( _lowercase = 2_0 ) -> int: UpperCAmelCase : List[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase : Any =...
338
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever a : List[str] = logging.getLogger(__name__) class UpperCamelCase_ ( __magic_name__...
338
1
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_...
338
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer a : List[Any] = logging.get_log...
338
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Any = { """configuration_blip_2""": [ """BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Blip2Config""", """Blip2QFormerConfi...
338
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: ...
338
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #...
338
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configu...
338
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( __magic_name__ ): lowercase = ['image_processor', 'tokenizer'] lowercase = 'CLIPImageProcessor' lowerca...
338
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> bool: UpperCAmelCase : Tuple = len(_lowercase ) + 1 UpperCAmelCase : List[Any] = len(_lowercase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefi...
338
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Dict = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} tr...
338
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase : List[str] = 0 while num > 0: digit_sum += num % 1_0 num //= 1_0 return digit_sum def __lowerCamelCase ( _lowercase = 1_0_0 ) -> int...
338
1
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> bool: if not isinstance(_lowercase , _lowercase ): UpperCAmelCase : List[str] = F'''Input value of [number={number}] must be an integer''' raise TypeError(_lowercase ) ...
338
'''simple docstring''' import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampl...
338
1
'''simple docstring''' from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch...
338
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[Any] = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100On...
338
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[str] = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConf...
338
'''simple docstring''' from math import loga def __lowerCamelCase ( _lowercase ) -> int: if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(_lowercase , _lowercase ): raise TypeError("""Input value must be...
338
1
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) a : str = { """sample_size""": 3_2, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2...
338
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. a : Optional[int] = 1_0 def __lowerCamelCase ( _lowercase , _lowercase , ...
338
1
'''simple docstring''' import argparse a : Optional[int] = """docs/source/_static/js/custom.js""" def __lowerCamelCase ( _lowercase ) -> Tuple: with open(_lowercase , encoding="""utf-8""" , newline="""\n""" ) as f: UpperCAmelCase : List[...
338
'''simple docstring''' import numpy as np class UpperCamelCase_ : def __init__( self ) -> int: UpperCAmelCase : str = (0, 0) UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : Any = 0 UpperCAmelC...
338
1
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> list[int]: UpperCAmelCase : List[str] = int(_lowercase ) # Initialize Result UpperCAmelCase : Tuple = [] # Traverse through all denomination for...
338
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a : Optional[int] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: ...
338
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor a : int = logging.get_logger(__name__) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , *A , **A ) -> Non...
338
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extrac...
338
1
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: ...
338
'''simple docstring''' a : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def __lowerCamelCase ( ) -> None: UpperCAmelCase : Optional[int] = input("""Enter message: """ ) UpperCAmelCase : Dict = input("""Enter key [alphanumeric]: """ ) ...
338
1
'''simple docstring''' class UpperCamelCase_ : def __init__( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = name UpperCAmelCase : Any = value UpperCAmelCase : Dict = weight ...
338
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( """split_dict""" , [ SplitDict(), SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_e...
338
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a : Optional[Any] = logging.get_logger(__name__) a : List[str] = { """m...
338
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor a : Dict = logging.get_logger(__name__) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , *A , **A ) -> ...
338
1
'''simple docstring''' import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPip...
338
'''simple docstring''' import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a : Union[str, An...
338
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): fr...
338
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[str] = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConf...
338
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, pr...
338
'''simple docstring''' import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) def __lowerCamelCase ( _lowercase ) -> Li...
338
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCamelCase_ ( __magic_name__ ...
338
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : ...
338
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a : Optional[int] = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], ...
338
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging i...
338
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation...
338
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __lowerCamelCase ( _lowercase ) -> List[Any]: for i in range(0 , _lowercase ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="...
338
1
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0 , _lowercase = 1_0 ) -> int: UpperCAmelCase : defaultdict = defaultdict(_lowercase ) for outer_width in range(3 , ...
338
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever a : List[str] = logging.getLogger(__name__) class UpperCamelCase_ ( __magic_name__...
338
1
'''simple docstring''' import numpy as np class UpperCamelCase_ : def __init__( self ) -> int: UpperCAmelCase : str = (0, 0) UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : Any = 0 UpperCAmelC...
338
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer a : List[Any] = logging.get_log...
338
1
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> str: return "\n".join( F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_ter...
350
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: ...
338
0
'''simple docstring''' import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers....
351
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configu...
338
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Any = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): ...
352
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> bool: UpperCAmelCase : Tuple = len(_lowercase ) + 1 UpperCAmelCase : List[Any] = len(_lowercase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefi...
338
0
'''simple docstring''' def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0 ) -> int: UpperCAmelCase : Any = set(range(3 , __lowerCAmelCase , 2 ) ) primes.add(2 ) for p in range(3 , __lowerCAmelCase , 2 ): if p not in...
353
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase : List[str] = 0 while num > 0: digit_sum += num % 1_0 num //= 1_0 return digit_sum def __lowerCamelCase ( _lowercase = 1_0_0 ) -> int...
338
0
'''simple docstring''' import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor...
354
'''simple docstring''' import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampl...
338
0
from typing import Any def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Optional[int]: _validation( __a , __a , __a , __a , __a , ) # Creates data structures and...
355
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[Any] = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100On...
338
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a : Tuple = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig...
356
'''simple docstring''' from math import loga def __lowerCamelCase ( _lowercase ) -> int: if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(_lowercase , _lowercase ): raise TypeError("""Input value must be...
338
0
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> List[Any]: UpperCA...
357
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. a : Optional[int] = 1_0 def __lowerCamelCase ( _lowercase , _lowercase , ...
338
0
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: return int((input_a, input_a).count(0 ) == 0 ) def __lowerCamelCase ( ) -> List[Any]: assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 ass...
358
'''simple docstring''' import numpy as np class UpperCamelCase_ : def __init__( self ) -> int: UpperCAmelCase : str = (0, 0) UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : Any = 0 UpperCAmelC...
338
0
'''simple docstring''' import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel f...
359
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a : Optional[int] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: ...
338
0
'''simple docstring''' from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class UpperCamelCase_ : def _lowercase( self , A ) -> Dict: raise NotImplementedError() def _lowercase( ...
360
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extrac...
338
0
'''simple docstring''' from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar a : Any = TypeVar("""T""") class UpperCamelCase_ ( Generic[T] ): lowercase = 42 # Cache store of keys lowercase = 42 ...
361
'''simple docstring''' a : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def __lowerCamelCase ( ) -> None: UpperCAmelCase : Optional[int] = input("""Enter message: """ ) UpperCAmelCase : Dict = input("""Enter key [alphanumeric]: """ ) ...
338
0
'''simple docstring''' from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class UpperCamelCase_ ( __magic_name__ ): lowercase = 'philschmid/bart-large-cnn-samsum' lowercase = ( 'This is a tool that summarizes an English ...
362
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( """split_dict""" , [ SplitDict(), SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_e...
338
0
import warnings from functools import wraps from typing import Callable def __lowerCamelCase ( _lowercase ) -> Callable: @wraps(_lowerCamelCase ) def _inner_fn(*_lowercase , **_lowercase ): warnings.warn( (F'''\'{fn.__name__}\' is experimenta...
363
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor a : Dict = logging.get_logger(__name__) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , *A , **A ) -> ...
338
0
'''simple docstring''' import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from t...
364
'''simple docstring''' import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a : Union[str, An...
338
0
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttent...
365
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[str] = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConf...
338
0
'''simple docstring''' class UpperCamelCase_ : def __init__( self , A , A , A ) -> Any: UpperCAmelCase : List[Any] = name UpperCAmelCase : Any = value UpperCAmelCase : int = weight def __repr__( se...
366
'''simple docstring''' import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) def __lowerCamelCase ( _lowercase ) -> Li...
338
0
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( ...
367
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : ...
338
0
'''simple docstring''' from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer a : Optional[Any] = logging.get_logger(__name__) a : Tuple = {'vocab_fi...
368
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging i...
338
0
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def __lowerCamelCase ( _lowercase ) -> List[Any]: return (data["d...
369
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __lowerCamelCase ( _lowercase ) -> List[Any]: for i in range(0 , _lowercase ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="...
338
0
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputW...
370
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever a : List[str] = logging.getLogger(__name__) class UpperCamelCase_ ( __magic_name__...
338
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available a : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailabl...
371
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer a : List[Any] = logging.get_log...
338
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a : Dict = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]} try: ...
350
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: ...
338
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, lo...
351
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configu...
338
0
'''simple docstring''' from __future__ import annotations import math from collections.abc import Callable def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase = 1_0_0 , ) -> float: UpperCAmelCase : Optional[int] = x_start...
352
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> bool: UpperCAmelCase : Tuple = len(_lowercase ) + 1 UpperCAmelCase : List[Any] = len(_lowercase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefi...
338
0
'''simple docstring''' def __lowerCamelCase ( _lowercase = 2_0_0 ) -> Any: UpperCAmelCase : Tuple = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0] UpperCAmelCase : List[str] = [0] * (pence + 1) UpperCAmelCase : Tuple = 1 # base case:...
353
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase : List[str] = 0 while num > 0: digit_sum += num % 1_0 num //= 1_0 return digit_sum def __lowerCamelCase ( _lowercase = 1_0_0 ) -> int...
338
0
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> bool: # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Valid...
354
'''simple docstring''' import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampl...
338
0
a : Dict = { '''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''', '''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''', ...
355
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[Any] = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100On...
338
0
'''simple docstring''' import math class UpperCamelCase_ : def __init__( self , A=0 ) -> Any: # a graph with Node 0,1,...,N-1 UpperCAmelCase : List[str] = n UpperCAmelCase : Any = [ [math.inf for j in range(0 , A )]...
356
'''simple docstring''' from math import loga def __lowerCamelCase ( _lowercase ) -> int: if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(_lowercase , _lowercase ): raise TypeError("""Input value must be...
338
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a : Optional[int] = logging.get_logger(__name__) a : List[str] = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics4...
357
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. a : Optional[int] = 1_0 def __lowerCamelCase ( _lowercase , _lowercase , ...
338
0
'''simple docstring''' from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from tran...
358
'''simple docstring''' import numpy as np class UpperCamelCase_ : def __init__( self ) -> int: UpperCAmelCase : str = (0, 0) UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : Any = 0 UpperCAmelC...
338
0
'''simple docstring''' import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import Accel...
359
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a : Optional[int] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: ...
338
0
'''simple docstring''' from PIL import Image def __lowerCamelCase ( _lowercase ) -> Image: UpperCAmelCase , UpperCAmelCase : Union[str, Any] = image.size UpperCAmelCase : Tuple = 0 UpperCAmelCase : Optional[int] = im...
360
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extrac...
338
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a : Optional[Any] = {} try: if not is_sentencepiece_available(): ...
361
'''simple docstring''' a : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def __lowerCamelCase ( ) -> None: UpperCAmelCase : Optional[int] = input("""Enter message: """ ) UpperCAmelCase : Dict = input("""Enter key [alphanumeric]: """ ) ...
338
0
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> float: return base * power(lowercase_ , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print("""Raise base to the power of exponent using recursion...""") a = int(in...
362
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( """split_dict""" , [ SplitDict(), SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_e...
338
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torc...
363
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor a : Dict = logging.get_logger(__name__) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , *A , **A ) -> ...
338
0
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #...
364
'''simple docstring''' import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a : Union[str, An...
338
0
'''simple docstring''' from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a = logging.get_logger(__name__) a = { "nielsr/canine-s": 2_0_4_8, } # Unicode defines 1,114,112 total “codepoints” a = 1_1_1_4_1_1_...
365
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[str] = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConf...
338
0
'''simple docstring''' import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class UpperCamelCase_ ( lowerCamelCase_ ): ...
366
'''simple docstring''' import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) def __lowerCamelCase ( _lowercase ) -> Li...
338
0
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import Ba...
367
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : ...
338
0
'''simple docstring''' import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class UpperCamelCase_ ( A_ ...
368
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging i...
338
0
from __future__ import annotations def __lowerCamelCase ( _lowercase , _lowercase ) -> bool: if len(_snake_case ) == 0: return False UpperCAmelCase : Dict = len(_snake_case ) // 2 if a_list[midpoint] == item: return True ...
369
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __lowerCamelCase ( _lowercase ) -> List[Any]: for i in range(0 , _lowercase ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="...
338
0
import os import numpy import onnx def __lowerCamelCase ( _lowercase , _lowercase ) -> Union[str, Any]: UpperCAmelCase : Dict = a.name UpperCAmelCase : Optional[int] = b.name UpperCAmelCase : Union[str, Any] = "" ...
370
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever a : List[str] = logging.getLogger(__name__) class UpperCamelCase_ ( __magic_name__...
338
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase_ ( unittest.TestC...
371
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer a : List[Any] = logging.get_log...
338
0
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
350
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: ...
338
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : int = { """configuration_bigbird_pegasus""": [ """BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BigBirdPegasusConfig""", ...
351
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configu...
338
0
'''simple docstring''' class UpperCamelCase_ : def __init__( self , A ) -> List[Any]: UpperCAmelCase : Any = val UpperCAmelCase : Optional[int] = None UpperCAmelCase : int = None def _lowercase( s...
352
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> bool: UpperCAmelCase : Tuple = len(_lowercase ) + 1 UpperCAmelCase : List[Any] = len(_lowercase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefi...
338
0
'''simple docstring''' import os def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Any = len(grid[0] ) UpperCAmelCase : List[str] = len(lowerCamelCase__ ) UpperCAmelCase : List[str] = 0 UpperCAmel...
353
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase : List[str] = 0 while num > 0: digit_sum += num % 1_0 num //= 1_0 return digit_sum def __lowerCamelCase ( _lowercase = 1_0_0 ) -> int...
338
0
'''simple docstring''' import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGener...
354
'''simple docstring''' import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampl...
338
0
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to ...
355
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[Any] = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100On...
338
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTIma...
356
'''simple docstring''' from math import loga def __lowerCamelCase ( _lowercase ) -> int: if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(_lowercase , _lowercase ): raise TypeError("""Input value must be...
338
0
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> Dict: return "".join([hex(_lowercase )[2:].zfill(2 ).upper() for byte in list(_lowercase )] ) def __lowerCamelCase ( _lowercase ) -> Dict: if (len(_lowercase ) % 2) != 0: raise ValueE...
357
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. a : Optional[int] = 1_0 def __lowerCamelCase ( _lowercase , _lowercase , ...
338
0
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokeniz...
358
'''simple docstring''' import numpy as np class UpperCamelCase_ : def __init__( self ) -> int: UpperCAmelCase : str = (0, 0) UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : Any = 0 UpperCAmelC...
338
0
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge a : List[str] = [ 'Prosecutor: "No videos were used in the crash investigation" German papers say they saw...
359
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a : Optional[int] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: ...
338
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a : List[str] = {} try: if not is_sentencepiece_available(): ...
360
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extrac...
338
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils im...
361
'''simple docstring''' a : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def __lowerCamelCase ( ) -> None: UpperCAmelCase : Optional[int] = input("""Enter message: """ ) UpperCAmelCase : Dict = input("""Enter key [alphanumeric]: """ ) ...
338
0
'''simple docstring''' import flax.linen as nn import jax import jax.numpy as jnp class UpperCamelCase_ ( nn.Module ): lowercase = 42 lowercase = jnp.floataa def _lowercase( self ) -> List[Any]: UpperCAmelCase : Union[str, Any] ...
362
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( """split_dict""" , [ SplitDict(), SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_e...
338
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : int = { 'configuration_clipseg': [ 'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig', ...
363
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor a : Dict = logging.get_logger(__name__) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , *A , **A ) -> ...
338
0
'''simple docstring''' import argparse import struct import unittest class UpperCamelCase_ : def __init__( self , A ) -> None: UpperCAmelCase : str = data # Initialize hash values UpperCAmelCase : Optional[int] = [ ...
364
'''simple docstring''' import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a : Union[str, An...
338
0
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import ...
365
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[str] = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConf...
338
0
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> bool: UpperCAmelCase : str = len(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase : Dict = len(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase : Dict = [[False f...
366
'''simple docstring''' import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) def __lowerCamelCase ( _lowercase ) -> Li...
338
0
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __lowerCamelCase ( ) -> List[str]: UpperCAmelCase : str = { """repo_name""": ["""test_repo1""",...
367
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : ...
338
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common...
368
'''simple docstring''' import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging i...
338
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_N...
369
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __lowerCamelCase ( _lowercase ) -> List[Any]: for i in range(0 , _lowercase ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="...
338
0
from math import log from scipy.constants import Boltzmann, physical_constants a : Optional[Any] = 3_0_0 # TEMPERATURE (unit = K) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , ) -> float: if donor_conc <= 0: raise ValueErro...
370
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever a : List[str] = logging.getLogger(__name__) class UpperCamelCase_ ( __magic_name__...
338
0
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a : Dict = logging.get_logger(__name__) a : Tuple = ...
371
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer a : List[Any] = logging.get_log...
338
0
'''simple docstring''' from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin...
350
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: ...
338
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a : str = { """configuration_mobilebert""": [ """MOBILEBERT_PRETRAINED_CON...
351
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configu...
338
0
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging a : int = loggin...
352
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> bool: UpperCAmelCase : Tuple = len(_lowercase ) + 1 UpperCAmelCase : List[Any] = len(_lowercase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefi...
338
0