python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
import os import torch import collections import logging from tqdm import tqdm, trange import json import bs4 from os import path as osp from bs4 import BeautifulSoup as bs # from transformers.models.bert.tokenization_bert import BasicTokenizer, whitespace_tokenize from torch.utils.data import Dataset import networkx a...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/websrc/websrc.py
import logging import torch from torch import nn from torch.nn import CrossEntropyLoss from transformers import BertConfig, BertModel, BertPreTrainedModel, RobertaConfig # from transformers.modeling_bert import BertLayerNorm, BertOnlyMLMHead from transformers.models.bert.modeling_bert import BertOnlyMLMHead BertLayerN...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/websrc/model.py
tags_dict = {'a': 0, 'abbr': 1, 'acronym': 2, 'address': 3, 'altGlyph': 4, 'altGlyphDef': 5, 'altGlyphItem': 6, 'animate': 7, 'animateColor': 8, 'animateMotion': 9, 'animateTransform': 10, 'applet': 11, 'area': 12, 'article': 13, 'aside': 14, 'audio': 15, 'b': 16, 'base': 17, 'basefont': 18, '...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/websrc/web_tag_utils.py
import os import sys sys.path.append(os.getcwd()) import torch import torch.nn as nn import shutil import logging import torch.distributed as dist from transformers import ( BertTokenizer, RobertaTokenizer ) from args import args from model import ( Layoutlmv1ForQuestionAnswering, Layoutlmv1Config,...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/websrc/run_websrc.py
import argparse parser = argparse.ArgumentParser() parser.add_argument("--exp_name", default='your_exp_name', type=str) parser.add_argument("--seed", default=42, type=int) parser.add_argument("--output_dir", default='.', type=str) parser.add_argument("--overwrite_output_dir", default=True) parser.add_argument("--mode...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/websrc/args.py
from genericpath import exists import os import torch.nn as nn import torch import logging from tqdm import tqdm, trange import timeit import collections import json import math from bs4 import BeautifulSoup from copy import deepcopy import string import re from torch.utils.tensorboard import SummaryWriter from torch...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/websrc/trainer.py
import collections import json import logging import os from typing import Optional, Tuple import numpy as np from tqdm.auto import tqdm logger = logging.getLogger(__name__) def postprocess_qa_predictions( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/squad/utils_qa.py
import logging import os os.environ['DISABLE_MLFLOW_INTEGRATION'] = 'True' import sys from dataclasses import dataclass, field from typing import Optional import datasets from datasets import load_dataset, load_metric import transformers from trainer_qa import QuestionAnsweringTrainer from transformers import ( A...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/squad/run_squad.py
from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput if is_torch_tpu_available(): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class QuestionAnsweringTrainer(Trainer): def __init__(self, *args, eval_examples=None,...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/squad/trainer_qa.py
from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import Seq2SeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput if is_torch_tpu_available(): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met clas...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/squad/trainer_seq2seq_qa.py
#!/usr/bin/env python # coding=utf-8 import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import ClassLabel, load_dataset, load_metric import layoutlmft.data.datasets.funsd import transformers from layoutlmft.data import DataCollator...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/run_funsd.py
import logging import torch from torch import nn from torch.nn import CrossEntropyLoss from transformers import BertConfig, BertModel, BertPreTrainedModel, RobertaConfig # from transformers.modeling_bert import BertLayerNorm, BertOnlyMLMHead logger = logging.getLogger(__name__) LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MA...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/model.py
import os import re import numpy as np from transformers.utils import logging logger = logging.get_logger(__name__) PREFIX_CHECKPOINT_DIR = "checkpoint" _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") def get_last_checkpoint(folder): content = os.listdir(folder) checkpoints = [ ...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/evaluation.py
from collections import OrderedDict from transformers import CONFIG_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_NAMES_MAPPING, TOKENIZER_MAPPING from transformers.convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, BertConverter, XLMRobertaConverter from transformers.models.auto.modeling_auto import auto...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/__init__.py
from dataclasses import dataclass from typing import Dict, Optional, Tuple import torch from transformers.file_utils import ModelOutput @dataclass class ReOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = No...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/utils.py
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/__init__.py
from dataclasses import dataclass, field from typing import Optional @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier f...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/model_args.py
# coding=utf-8 from transformers.models.layoutlm.tokenization_layoutlm import LayoutLMTokenizer from transformers.utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/layoutlmv2-base-uncased":...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlmv2/tokenization_layoutlmv2.py
from .configuration_layoutlmv2 import LayoutLMv2Config from .modeling_layoutlmv2 import LayoutLMv2ForRelationExtraction, LayoutLMv2ForTokenClassification, LayoutLMv2Model from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer from .tokenization_layoutlmv2_fast import LayoutLMv2TokenizerFast
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlmv2/__init__.py
# -*- coding: utf-8 -*- def add_layoutlmv2_config(cfg): _C = cfg # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C.MODEL.MASK_ON = True # When using pre-trained m...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlmv2/detectron2_config.py
# coding=utf-8 import math import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss import detectron2 from detectron2.modeling import META_ARCH_REGISTRY from transformers import PreTrainedModel from transformers.modeling_outputs import ( ...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlmv2/modeling_layoutlmv2.py
# coding=utf-8 from transformers.models.layoutlm.tokenization_layoutlm_fast import LayoutLMTokenizerFast from transformers.utils import logging from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer....
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlmv2/tokenization_layoutlmv2_fast.py
# coding=utf-8 from transformers.models.layoutlm.configuration_layoutlm import LayoutLMConfig from transformers.utils import logging logger = logging.get_logger(__name__) LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP = { "layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlmv2/configuration_layoutlmv2.py
# coding=utf-8 from transformers.utils import logging from ..layoutlmv2 import LayoutLMv2Config logger = logging.get_logger(__name__) LAYOUTXLM_PRETRAINED_CONFIG_ARCHIVE_MAP = { "layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/config.json", "layoutxlm-large": "https://huggingface.co/lay...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutxlm/configuration_layoutxlm.py
# coding=utf-8 from transformers import XLMRobertaTokenizerFast from transformers.file_utils import is_sentencepiece_available from transformers.utils import logging if is_sentencepiece_available(): from .tokenization_layoutxlm import LayoutXLMTokenizer else: LayoutXLMTokenizer = None logger = logging.get_l...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutxlm/tokenization_layoutxlm_fast.py
# coding=utf-8 from transformers import XLMRobertaTokenizer from transformers.utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "layoutxlm-base": "https://huggin...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutxlm/tokenization_layoutxlm.py
from .configuration_layoutxlm import LayoutXLMConfig from .modeling_layoutxlm import LayoutXLMForRelationExtraction, LayoutXLMForTokenClassification, LayoutXLMModel from .tokenization_layoutxlm import LayoutXLMTokenizer from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutxlm/__init__.py
# coding=utf-8 from transformers.utils import logging from ..layoutlmv2 import LayoutLMv2ForRelationExtraction, LayoutLMv2ForTokenClassification, LayoutLMv2Model from .configuration_layoutxlm import LayoutXLMConfig logger = logging.get_logger(__name__) LAYOUTXLM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "layoutxlm-base...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutxlm/modeling_layoutxlm.py
from transformers.models.layoutlm import *
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlm/__init__.py
import collections import time from typing import Any, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import nn from torch.utils.data import DataLoader, Dataset from transformers.trainer_utils import EvalPrediction, PredictionOutput, speed_metrics from transformers.utils impo...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/trainers/xfun_trainer.py
from .funsd_trainer import FunsdTrainer from .xfun_trainer import XfunReTrainer, XfunSerTrainer
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/trainers/__init__.py
from typing import Any, Dict, Union import torch from transformers import Trainer class FunsdTrainer(Trainer): def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: """ Prepare :obj:`inputs` before feeding them to the model, converting the...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/trainers/funsd_trainer.py
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/modules/__init__.py
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/modules/decoders/__init__.py
import copy import torch from torch import nn from torch.nn import CrossEntropyLoss class BiaffineAttention(torch.nn.Module): """Implements a biaffine attention operator for binary relation classification. PyTorch implementation of the biaffine attention operator from "End-to-end neural relation extract...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/modules/decoders/re.py
# flake8: noqa from .data_collator import DataCollatorForKeyValueExtraction from .datasets import *
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/__init__.py
import torch from detectron2.data.detection_utils import read_image from detectron2.data.transforms import ResizeTransform, TransformList def normalize_bbox(bbox, size): return [ int(1000 * bbox[0] / size[0]), int(1000 * bbox[1] / size[1]), int(1000 * bbox[2] / size[0]), int(1000 ...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/utils.py
from dataclasses import dataclass, field from typing import Optional @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, p...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/data_args.py
from dataclasses import dataclass from typing import Optional, Union import torch from detectron2.structures import ImageList from transformers import PreTrainedTokenizerBase from transformers.file_utils import PaddingStrategy @dataclass class DataCollatorForKeyValueExtraction: """ Data collator that will d...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/data_collator.py
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/datasets/__init__.py
# Lint as: python3 import json import logging import os import datasets from layoutlmft.data.utils import load_image, merge_bbox, normalize_bbox, simplify_bbox from transformers import AutoTokenizer _URL = "https://github.com/doc-analysis/XFUN/releases/download/v1.0/" _LANG = ["zh", "de", "es", "fr", "en", "it", "...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/datasets/xfun.py
# coding=utf-8 import json import os import datasets from layoutlmft.data.utils import load_image, normalize_bbox logger = datasets.logging.get_logger(__name__) _CITATION = """\ @article{Jaume2019FUNSDAD, title={FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents}, author={Guillaume Jaume and ...
EXA-1-master
exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/datasets/funsd.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import logging import math import os import pickle import random from time import sleep import numpy as np import torch from nltk.translate.bleu_score import sent...
EXA-1-master
exa/models/unilm-master/layoutreader/decode_seq2seq.py
from io import open from setuptools import find_packages, setup extras = { 'serving': ['pydantic', 'uvicorn', 'fastapi'], 'serving-tf': ['pydantic', 'uvicorn', 'fastapi'], 'serving-torch': ['pydantic', 'uvicorn', 'fastapi', 'torch'] } extras['all'] = [package for package in extras.values()] setup( na...
EXA-1-master
exa/models/unilm-master/layoutreader/setup.py
from __future__ import absolute_import, division, print_function import argparse import json import logging import os import random import numpy as np import torch from torch.utils.data import (DataLoader, SequentialSampler) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensor...
EXA-1-master
exa/models/unilm-master/layoutreader/run_seq2seq.py
from __future__ import absolute_import, division, print_function, unicode_literals import logging from transformers import BertConfig, RobertaConfig from s2s_ft.configuration_unilm import UnilmConfig # from s2s_ft.modeling import LayoutlmConfig logger = logging.getLogger(__name__) class BertForSeq2SeqConfig(BertCon...
EXA-1-master
exa/models/unilm-master/layoutreader/s2s_ft/config.py
# coding=utf-8 # The MIT License (MIT) # Copyright (c) Microsoft Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # t...
EXA-1-master
exa/models/unilm-master/layoutreader/s2s_ft/configuration_minilm.py
# coding=utf-8 """PyTorch BERT model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import json import logging import math import os import numpy as np import torch import torch.nn.functional as F from torch import nn from torch.nn.modules...
EXA-1-master
exa/models/unilm-master/layoutreader/s2s_ft/modeling_decoding.py
import numpy as np from random import randint, shuffle, choice from random import random as rand import math import logging import torch import torch.utils.data logger = logging.getLogger(__name__) def get_random_word(vocab_words): i = randint(0, len(vocab_words)-1) return vocab_words[i] def batch_list_t...
EXA-1-master
exa/models/unilm-master/layoutreader/s2s_ft/s2s_loader.py
import torch import logging from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME logger = logging.getLogger(__name__) def get_checkpoint_from_transformer_cache( archive_file, pretrained_model_name_or_path, pretrained_model_archive_map, cache_dir, force...
EXA-1-master
exa/models/unilm-master/layoutreader/s2s_ft/convert_state_dict.py
# coding=utf-8 # The MIT License (MIT) # Copyright (c) Microsoft Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # t...
EXA-1-master
exa/models/unilm-master/layoutreader/s2s_ft/tokenization_unilm.py
# coding=utf-8 # The MIT License (MIT) # Copyright (c) Microsoft Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # t...
EXA-1-master
exa/models/unilm-master/layoutreader/s2s_ft/configuration_unilm.py
from __future__ import absolute_import, division, print_function import logging import os import json import random import glob import re import torch import tqdm import torch.utils.data logger = logging.getLogger(__name__) class Seq2seqDatasetForBert(torch.utils.data.Dataset): def __init__( self, ...
EXA-1-master
exa/models/unilm-master/layoutreader/s2s_ft/utils.py
# coding=utf-8 # The MIT License (MIT) # Copyright (c) Microsoft Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # t...
EXA-1-master
exa/models/unilm-master/layoutreader/s2s_ft/tokenization_minilm.py
from __future__ import absolute_import, division, print_function, unicode_literals import logging import math import os import torch from torch import nn from torch.nn.modules.loss import _Loss import torch.nn.functional as F from transformers import BertConfig from transformers.modeling_bert import \ BertPreTra...
EXA-1-master
exa/models/unilm-master/layoutreader/s2s_ft/modeling.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import json import glob import logging import argparse import math from tqdm import tqdm import numpy as np import torch import random import pickle from s2s_ft.modelin...
EXA-1-master
exa/models/unilm-master/s2s-ft/decode_seq2seq.py
from io import open from setuptools import find_packages, setup extras = { 'serving': ['pydantic', 'uvicorn', 'fastapi'], 'serving-tf': ['pydantic', 'uvicorn', 'fastapi'], 'serving-torch': ['pydantic', 'uvicorn', 'fastapi', 'torch'] } extras['all'] = [package for package in extras.values()] setup( na...
EXA-1-master
exa/models/unilm-master/s2s-ft/setup.py
import pickle import math import argparse import glob import logging from pathlib import Path from tqdm import tqdm import unicodedata from transformers import BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.tokenization_minilm import MinilmTokenize...
EXA-1-master
exa/models/unilm-master/s2s-ft/gen_seq_from_trace.py
from __future__ import absolute_import, division, print_function import argparse import logging import os import json import random import numpy as np import torch from torch.utils.data import (DataLoader, SequentialSampler) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensor...
EXA-1-master
exa/models/unilm-master/s2s-ft/run_seq2seq.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib i...
EXA-1-master
exa/models/unilm-master/s2s-ft/evaluations/eval_for_xsum.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib i...
EXA-1-master
exa/models/unilm-master/s2s-ft/evaluations/eval_for_gigaword.py
from __future__ import print_function, unicode_literals, division import os import re import codecs import platform from subprocess import check_output from tempfile import mkdtemp from functools import partial try: from configparser import ConfigParser except ImportError: from ConfigParser import ConfigPars...
EXA-1-master
exa/models/unilm-master/s2s-ft/evaluations/bs_pyrouge.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib i...
EXA-1-master
exa/models/unilm-master/s2s-ft/evaluations/eval_for_cnndm.py
from __future__ import absolute_import, division, print_function, unicode_literals import logging from transformers import BertConfig, RobertaConfig from s2s_ft.configuration_unilm import UnilmConfig logger = logging.getLogger(__name__) class BertForSeq2SeqConfig(BertConfig): def __init__(self, label_smoothing=...
EXA-1-master
exa/models/unilm-master/s2s-ft/s2s_ft/config.py
# coding=utf-8 # The MIT License (MIT) # Copyright (c) Microsoft Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # t...
EXA-1-master
exa/models/unilm-master/s2s-ft/s2s_ft/configuration_minilm.py
# coding=utf-8 """PyTorch BERT model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import copy import json import math import logging import tarfile import tempfile import shutil import numpy as np from functools import partial import torc...
EXA-1-master
exa/models/unilm-master/s2s-ft/s2s_ft/modeling_decoding.py
import numpy as np from random import randint, shuffle, choice from random import random as rand import math import logging import torch import torch.utils.data logger = logging.getLogger(__name__) def get_random_word(vocab_words): i = randint(0, len(vocab_words)-1) return vocab_words[i] def batch_list_t...
EXA-1-master
exa/models/unilm-master/s2s-ft/s2s_ft/s2s_loader.py
import torch import logging from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME logger = logging.getLogger(__name__) def get_checkpoint_from_transformer_cache( archive_file, pretrained_model_name_or_path, pretrained_model_archive_map, cache_dir, force...
EXA-1-master
exa/models/unilm-master/s2s-ft/s2s_ft/convert_state_dict.py
# coding=utf-8 # The MIT License (MIT) # Copyright (c) Microsoft Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # t...
EXA-1-master
exa/models/unilm-master/s2s-ft/s2s_ft/tokenization_unilm.py
# coding=utf-8 # The MIT License (MIT) # Copyright (c) Microsoft Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # t...
EXA-1-master
exa/models/unilm-master/s2s-ft/s2s_ft/configuration_unilm.py
from __future__ import absolute_import, division, print_function import logging import os import json import random import glob import torch import tqdm import array import collections import torch.utils.data from transformers.file_utils import WEIGHTS_NAME try: import lmdb except: pass OPTIM_NAME = "optimize...
EXA-1-master
exa/models/unilm-master/s2s-ft/s2s_ft/utils.py
# coding=utf-8 # The MIT License (MIT) # Copyright (c) Microsoft Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # t...
EXA-1-master
exa/models/unilm-master/s2s-ft/s2s_ft/tokenization_minilm.py
from __future__ import absolute_import, division, print_function, unicode_literals import logging import math import os import torch from torch import nn from torch.nn.modules.loss import _Loss import torch.nn.functional as F from transformers.modeling_bert import \ BertPreTrainedModel, BertSelfOutput, BertInter...
EXA-1-master
exa/models/unilm-master/s2s-ft/s2s_ft/modeling.py
import torch.nn as nn import torch from fairseq.modules.quant_noise import quant_noise from fairseq.modules import MultiheadAttention from fairseq.modules.transformer_layer import TransformerDecoderLayerBase from fairseq.models.transformer import TransformerDecoderBase, TransformerDecoder from fairseq.modules.checkpoi...
EXA-1-master
exa/models/unilm-master/trocr/unilm_models.py
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block fr...
EXA-1-master
exa/models/unilm-master/trocr/deit.py
import os from fairseq import search from fairseq import scoring, utils, metrics from fairseq.data import Dictionary, encoders from fairseq.tasks import LegacyFairseqTask, register_task from fairseq.tasks.fairseq_task import FairseqTask try: from .data import SROIETextRecognitionDataset, Receipt53KDataset, Synthe...
EXA-1-master
exa/models/unilm-master/trocr/task.py
from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel from fairseq.models.transformer import base_architecture as base_transformer from fairseq.models.fairseq_encoder impor...
EXA-1-master
exa/models/unilm-master/trocr/trocr_models.py
from fairseq.scoring import BaseScorer, register_scorer from nltk.metrics.distance import edit_distance from fairseq.dataclass import FairseqDataclass import fastwer from Levenshtein import distance import string @register_scorer("cer", dataclass=FairseqDataclass) class CERScorer(BaseScorer): def __init__(self, cf...
EXA-1-master
exa/models/unilm-master/trocr/scoring.py
import os from data import SROIETask2 from tqdm import tqdm import shutil import zipfile if __name__ == '__main__': test_dir = '../SROIE_Task2_Original/test' output_dir = 'temp' os.makedirs(output_dir, exist_ok=True) generate_txt_path = '../generate-test.txt' output_file = None output_fp = None...
EXA-1-master
exa/models/unilm-master/trocr/convert_to_SROIE_format.py
from .task import TextRecognitionTask from .vit_models import ViTTRModel, ViT_TR_base from .scoring import AccEDScorer from .deit import * from .trocr_models import TrOCRModel from .bpe import GPT2BPEEnhancedSpace
EXA-1-master
exa/models/unilm-master/trocr/__init__.py
import torch import math from typing import Dict, List, Optional from fairseq.sequence_generator import SequenceGenerator from torch import Tensor class TextRecognitionGenerator(SequenceGenerator): def _generate( self, sample: Dict[str, Dict[str, Tensor]], prefix_tokens: Optional[Tensor]...
EXA-1-master
exa/models/unilm-master/trocr/generator.py
import torch.nn as nn from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel from fairseq.models.fairseq_encoder import EncoderOut from fairseq import utils # from timm.mod...
EXA-1-master
exa/models/unilm-master/trocr/vit_models.py
import torchvision.transforms as transforms # from torchvision.transforms.functional import InterpolationMode from PIL import Image, ImageFilter import random import torch import numpy as np import logging from enum import Enum from .augmentation.warp import Curve, Distort, Stretch from .augmentation.geometry import Ro...
EXA-1-master
exa/models/unilm-master/trocr/data_aug.py
import task import deit import trocr_models import torch import fairseq from fairseq import utils from fairseq_cli import generate from PIL import Image import torchvision.transforms as transforms def init(model_path, beam=5): model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task( [mode...
EXA-1-master
exa/models/unilm-master/trocr/pic_inference.py
import glob import logging import os import random import torch from fairseq.data import FairseqDataset, data_utils from natsort import natsorted from PIL import Image from tqdm import tqdm logger = logging.getLogger(__name__) def default_collater(target_dict, samples, dataset=None): if not samples: ret...
EXA-1-master
exa/models/unilm-master/trocr/data.py
from tempfile import tempdir from fairseq.data.encoders.gpt2_bpe import GPT2BPE, GPT2BPEConfig from fairseq.data.encoders import register_bpe import logging logger = logging.getLogger(__name__) INSERT_OR_REPLACE = 0 # 1 for replace and 0 for insert @register_bpe("gpt2es", dataclass=GPT2BPEConfig) # as stands for att...
EXA-1-master
exa/models/unilm-master/trocr/bpe.py
import cv2 import numpy as np import math from PIL import Image, ImageOps, ImageDraw from skimage import color from scipy import interpolate from pkg_resources import resource_filename from io import BytesIO from .ops import plasma_fractal, clipped_zoom, MotionImage ''' PIL resize (W,H) ''' class Fog: def __in...
EXA-1-master
exa/models/unilm-master/trocr/augmentation/weather.py
EXA-1-master
exa/models/unilm-master/trocr/augmentation/__init__.py
import cv2 import numpy as np from PIL import Image, ImageOps, ImageDraw ''' PIL resize (W,H) Torch resize is (H,W) ''' class VGrid: def __init__(self): pass def __call__(self, img, copy=True, max_width=4, mag=-1, prob=1.): if np.random.uniform(0,1) > prob: return img ...
EXA-1-master
exa/models/unilm-master/trocr/augmentation/pattern.py
import os import cv2 from warp import Curve, Distort, Stretch from geometry import Rotate, Perspective, Shrink, TranslateX, TranslateY from pattern import VGrid, HGrid, Grid, RectGrid, EllipseGrid from noise import GaussianNoise, ShotNoise, ImpulseNoise, SpeckleNoise from blur import GaussianBlur, DefocusBlur, MotionB...
EXA-1-master
exa/models/unilm-master/trocr/augmentation/test.py
import cv2 import numpy as np from wand.image import Image as WandImage from scipy.ndimage import zoom as scizoom from wand.api import library as wandlibrary class MotionImage(WandImage): def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0): wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, a...
EXA-1-master
exa/models/unilm-master/trocr/augmentation/ops.py
import cv2 import numpy as np from PIL import Image, ImageOps import torchvision.transforms as transforms from wand.image import Image as WandImage from scipy.ndimage import zoom as scizoom from skimage.filters import gaussian from wand.api import library as wandlibrary from io import BytesIO #from skimage import col...
EXA-1-master
exa/models/unilm-master/trocr/augmentation/blur.py
import cv2 import numpy as np import skimage as sk from PIL import Image, ImageOps from io import BytesIO from skimage import color ''' PIL resize (W,H) cv2 image is BGR PIL image is RGB ''' class Contrast: def __init__(self): pass def __call__(self, img, mag=-1, prob=1.): if np.r...
EXA-1-master
exa/models/unilm-master/trocr/augmentation/camera.py
import numpy as np import skimage as sk from PIL import Image ''' PIL resize (W,H) ''' class GaussianNoise: def __init__(self): pass def __call__(self, img, mag=-1, prob=1.): if np.random.uniform(0,1) > prob: return img W, H = img.size #c = np.random.uniform(....
EXA-1-master
exa/models/unilm-master/trocr/augmentation/noise.py
import cv2 import numpy as np from PIL import Image, ImageOps ''' PIL resize (W,H) Torch resize is (H,W) ''' class Shrink: def __init__(self): self.tps = cv2.createThinPlateSplineShapeTransformer() self.translateXAbs = TranslateXAbs() self.translateYAbs = TranslateYAbs() def _...
EXA-1-master
exa/models/unilm-master/trocr/augmentation/geometry.py
import cv2 import numpy as np from PIL import Image, ImageOps ''' PIL resize (W,H) Torch resize is (H,W) ''' class Stretch: def __init__(self): self.tps = cv2.createThinPlateSplineShapeTransformer() def __call__(self, img, mag=-1, prob=1.): if np.random.uniform(0,1) > prob: ...
EXA-1-master
exa/models/unilm-master/trocr/augmentation/warp.py
from PIL import Image import PIL.ImageOps, PIL.ImageEnhance import numpy as np class Posterize: def __init__(self): pass def __call__(self, img, mag=-1, prob=1.): if np.random.uniform(0,1) > prob: return img c = [1, 3, 6] if mag<0 or mag>=len(c): index...
EXA-1-master
exa/models/unilm-master/trocr/augmentation/process.py
# -------------------------------------------------------- # BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) # Github source: https://github.com/microsoft/unilm/tree/master/beit # Copyright (c) 2021 Microsoft # Licensed under The MIT License [see LICENSE for details] # By Hangbo Bao # B...
EXA-1-master
exa/models/unilm-master/dit/classification/engine_for_finetuning.py
# -------------------------------------------------------- # BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) # Github source: https://github.com/microsoft/unilm/tree/master/beit # Copyright (c) 2021 Microsoft # Licensed under The MIT License [see LICENSE for details] # By Hangbo Bao # B...
EXA-1-master
exa/models/unilm-master/dit/classification/transforms.py
# -------------------------------------------------------- # BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) # Github source: https://github.com/microsoft/unilm/tree/master/beit # Copyright (c) 2021 Microsoft # Licensed under The MIT License [see LICENSE for details] # By Hangbo Bao # B...
EXA-1-master
exa/models/unilm-master/dit/classification/datasets.py