python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
import json import os from collections import defaultdict import numpy as np if __name__ == '__main__': base_dir = "vqa/reviews/coco2014_val80" review_files = [x for x in os.listdir(base_dir) if x.endswith('.jsonl') and x.startswith('gpt4_text')] for review_file in sorted(review_files): config =...
EXA-1-master
exa/models/LLaVA-main/llava/eval/summarize_gpt_review.py
import argparse import json import os import openai import tqdm import ray import time @ray.remote(num_cpus=4) def get_eval(content: str, max_tokens: int): while True: try: response = openai.ChatCompletion.create( model='gpt-4', messages=[{ '...
EXA-1-master
exa/models/LLaVA-main/llava/eval/eval_gpt_review.py
import argparse from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig import torch import os import json from tqdm import tqdm import shortuuid from llava.conversation import conv_templates from llava.utils import disable_torch_init from transformers import CLIPVisionModel, CLIPImageProcessor, Stopp...
EXA-1-master
exa/models/LLaVA-main/llava/eval/model_vqa_science.py
"""Generate answers with GPT-3.5""" # Note: you need to be using OpenAI Python v0.27.0 for the code below to work import argparse import json import os import time import concurrent.futures import openai import tqdm import shortuuid MODEL = 'gpt-3.5-turbo' MODEL_ID = 'gpt-3.5-turbo:20230327' def get_answer(question_...
EXA-1-master
exa/models/LLaVA-main/llava/eval/qa_baseline_gpt35.py
import argparse import json import os import re import random def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--base-dir', type=str) parser.add_argument('--result-file', type=str) parser.add_argument('--output-file', type=str) parser.add_argument('--output-result', type=str...
EXA-1-master
exa/models/LLaVA-main/llava/eval/eval_science_qa.py
"""Generate json file for webpage.""" import json import os import re # models = ['llama', 'alpaca', 'gpt35', 'bard'] models = ['vicuna'] def read_jsonl(path: str, key: str=None): data = [] with open(os.path.expanduser(path)) as f: for line in f: if not line: continue ...
EXA-1-master
exa/models/LLaVA-main/llava/eval/generate_webpage_data_from_table.py
import argparse import json import os import re import random from collections import defaultdict def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--base-dir', type=str) parser.add_argument('--gpt4-result', type=str) parser.add_argument('--our-result', type=str) parser.add_a...
EXA-1-master
exa/models/LLaVA-main/llava/eval/eval_science_qa_gpt4.py
import argparse from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig import torch import os import json from tqdm import tqdm import shortuuid from llava.conversation import conv_templates from llava.utils import disable_torch_init from transformers import CLIPVisionModel, CLIPImageProcessor, Stopp...
EXA-1-master
exa/models/LLaVA-main/llava/eval/model_vqa.py
import argparse from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria import torch import os import json from tqdm import tqdm import shortuuid from llava.conversation import default_conversation from llava.utils import disable_torch_init # new stopping implementation class KeywordsStoppingC...
EXA-1-master
exa/models/LLaVA-main/llava/eval/model_qa.py
import argparse import json import os import re import random from collections import defaultdict def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--base-dir', type=str) parser.add_argument('--gpt4-result', type=str) parser.add_argument('--requery-result', type=str) parser.a...
EXA-1-master
exa/models/LLaVA-main/llava/eval/eval_science_qa_gpt4_requery.py
import argparse import json import os import openai import tqdm import ray import time @ray.remote(num_cpus=4) def get_eval(content: str, max_tokens: int): while True: try: response = openai.ChatCompletion.create( model='gpt-4', messages=[{ '...
EXA-1-master
exa/models/LLaVA-main/llava/eval/eval_gpt_review_visual.py
import argparse from collections import defaultdict import datetime import json import os import time import gradio as gr import requests from llava.conversation import (default_conversation, conv_templates, SeparatorStyle) from llava.constants import LOGDIR from llava.utils import ...
EXA-1-master
exa/models/LLaVA-main/llava/serve/gradio_web_server.py
""" A model worker executes the model. """ import argparse import asyncio import dataclasses import logging import json import time from typing import List, Union import threading import uuid from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse import requests from tran...
EXA-1-master
exa/models/LLaVA-main/llava/serve/model_worker.py
""" A controller manages distributed workers. It sends worker addresses to clients. """ import argparse import asyncio import dataclasses from enum import Enum, auto import json import logging import time from typing import List, Union import threading from fastapi import FastAPI, Request from fastapi.responses import...
EXA-1-master
exa/models/LLaVA-main/llava/serve/controller.py
""" Manually register workers. Usage: python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name http://localhost:21002 """ import argparse import requests if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--controller-address", type=str) ...
EXA-1-master
exa/models/LLaVA-main/llava/serve/register_worker.py
""" Adopted from https://github.com/gradio-app/gradio/blob/main/gradio/components.py Fix a markdown render problem. """ from __future__ import annotations from gradio.components import * from markdown2 import Markdown class _Keywords(Enum): NO_VALUE = "NO_VALUE" # Used as a sentinel to determine if nothing is p...
EXA-1-master
exa/models/LLaVA-main/llava/serve/gradio_patch.py
import argparse import json import requests from llava.conversation import default_conversation def main(): if args.worker_address: worker_addr = args.worker_address else: controller_addr = args.controller_address ret = requests.post(controller_addr + "/refresh_all_workers") ...
EXA-1-master
exa/models/LLaVA-main/llava/serve/test_message.py
EXA-1-master
exa/models/LLaVA-main/llava/serve/__init__.py
""" Usage: python3 -m fastchat.serve.cli --model ~/model_weights/llama-7b """ import argparse import time import torch from transformers import AutoTokenizer, AutoModelForCausalLM from llava.conversation import conv_templates, SeparatorStyle @torch.inference_mode() def generate_stream(tokenizer, model, params, devi...
EXA-1-master
exa/models/LLaVA-main/llava/serve/cli.py
code_highlight_css = ( """ #chatbot .hll { background-color: #ffffcc } #chatbot .c { color: #408080; font-style: italic } #chatbot .err { border: 1px solid #FF0000 } #chatbot .k { color: #008000; font-weight: bold } #chatbot .o { color: #666666 } #chatbot .ch { color: #408080; font-style: italic } #chatbot .cm { color:...
EXA-1-master
exa/models/LLaVA-main/llava/serve/gradio_css.py
""" Usage: python3 -m fastchat.data.optional_clean --lang en --reduce-rep --in sharegpt_clean.json --out output.json python3 -m fastchat.data.optional_clean --skip-lang en --reduce-rep --in sharegpt_clean.json --out output.json """ import argparse import json import re import polyglot from polyglot.detect import Detec...
EXA-1-master
exa/models/LLaVA-main/llava/data/optional_clean.py
""" Usage: python3 -m fastchat.data.clean_sharegpt --in sharegpt_html.json --out sharegpt_clean.json """ import argparse import json import logging import re from typing import Dict, Union import bs4 import markdownify # == 0.11.6 import tqdm def _get_html_tags(file_path: str): # Generate the list of html tags ...
EXA-1-master
exa/models/LLaVA-main/llava/data/clean_sharegpt.py
""" Split long conversations based on certain max length. Usage: python3 -m fastchat.data.split_long_conversation \ --in sharegpt_clean.json \ --out sharegpt_split.json \ --model-name-or-path $<model-name> """ import argparse import json from typing import Dict, Sequence, Optional import transformers impo...
EXA-1-master
exa/models/LLaVA-main/llava/data/split_long_conversation.py
EXA-1-master
exa/models/LLaVA-main/llava/data/__init__.py
""" Usage: python3 -m fastchat.data.inspect --in sharegpt_20230322_clean_lang_split.json """ import argparse import json import tqdm if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--in-file", type=str, required=True) parser.add_argument("--begin", type=int) args = ...
EXA-1-master
exa/models/LLaVA-main/llava/data/inspect.py
""" Usage: python3 pretty_json.py --in in.json --out out.json """ import argparse import json if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--in-file", type=str, required=True) parser.add_argument("--out-file", type=str, required=True) args = parser.parse_args() ...
EXA-1-master
exa/models/LLaVA-main/llava/data/pretty_json.py
import argparse import json import pathlib # Prompt from stanford alpaca's training script PROMPT_DICT = { "prompt_input": ( "Below is an instruction that describes a task, paired with an input that provides further context. " "Write a response that appropriately completes the request.\n\n" ...
EXA-1-master
exa/models/LLaVA-main/llava/data/alpaca-converter.py
import time import torch from accelerate.utils import set_seed from datasets import load_dataset from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader from transformers import get_scheduler, default_data_collator, get_linear_schedule_with_warmup from torch.optim import AdamW from kosmos import...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/training/train_kosmos_optimized.py
import time import torch from accelerate.utils import set_seed from datasets import load_dataset from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from kosmos import Kosmos, KosmosTokenizer from accelerate impo...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/training/train_kosmos_stable.py
import time import torch from accelerate.utils import set_seed from datasets import load_dataset from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader from transformers import get_scheduler, default_data_collator, get_linear_schedule_with_warmup from torch.optim import AdamW from .kosmos impor...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/training/train_kosmos_original.py
import time import torch from accelerate.utils import set_seed from datasets import load_dataset from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader from transformers import get_scheduler, default_data_collator, get_linear_schedule_with_warmup from torch.optim import AdamW from .kosmos impor...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/training/training_kosmos_apex.py
#quantization + paralleism import time import torch from accelerate.utils import set_seed from datasets import load_dataset from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader from transformers import get_scheduler, default_data_collator, get_linear_schedule_with_warmup from torch.optim impor...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/training/training_kosmos_3.py
import time import torch from accelerate.utils import set_seed from datasets import load_dataset from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader from transformers import get_scheduler, default_data_collator, get_linear_schedule_with_warmup from torch.optim import AdamW from .kosmos impor...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/training/train_kosmos_text.py
import time import torch from accelerate.utils import set_seed from datasets import load_dataset from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader from transformers import get_scheduler, default_data_collator, get_linear_schedule_with_warmup from torch.optim import AdamW from .kosmos impor...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/training/train_kosmos_code.py
import time import torch from accelerate.utils import set_seed from datasets import load_dataset from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader from transformers import get_scheduler, default_data_collator, get_linear_schedule_with_warmup from torch.optim import AdamW from .kosmos impor...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/training/train_kosmos.py
import torch from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.component.embedding import PositionalEmbedding from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast from tokenizers import SentencePieceBPETokenize...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/training/notebookExperiments/main.py
import torch from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.component.embedding import PositionalEmbedding from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast from tokenizers import SentencePieceBPETokenize...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/model/kosmosSP.py
import torch from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.component.embedding import PositionalEmbedding from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast from tokenizers import SentencePieceBPETokenize...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/model/kosmos.py
import torch from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.component.embedding import PositionalEmbedding from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast from tokenizers import SentencePieceBPETokenize...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/model/video/kosmos_video.py
import torch from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.component.embedding import PositionalEmbedding from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast from tokenizers import SentencePieceBPETokenize...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/model/video/kosmos_conditional.py
import torch from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.component.embedding import PositionalEmbedding from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast from tokenizers import SentencePieceBPETokenize...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/model/audio/kosmos_audio.py
import torch from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.component.embedding import PositionalEmbedding from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast from tokenizers import SentencePieceBPETokenize...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/model/audio/kosmos_audio_data2vec.py
import torch from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.component.embedding import PositionalEmbedding from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast from tokenizers import SentencePieceBPETokenize...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/model/audio/kosmos_conditional.py
""" GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright © 2007 Free Software Foundation, Inc. <https://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for soft...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/model/allModalities/kosmos3.py
import os import requests import torch from torch.nn import Module from torchvision import transforms from torchvision.models.video import r3d_18 from transformers import ( AutoModel, AutoTokenizer, CLIPModel, CLIPProcessor, Wav2Vec2ForCTC, T5Tokenizer, Wav2Vec2Processor, ) from torchscale.a...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/model/allModalities/kosmos2.py
import os import torch from torch.nn import Module from torchvision import transforms from torchvision.models.video import r3d_18 from transformers import ( AutoModel, AutoTokenizer, CLIPModel, CLIPProcessor, Data2VecForCTC, T5Tokenizer, Wav2Vec2Processor, list_models ) # Add additional...
EXA-1-master
exa/models/KOSMOS_reimplementation-main/model/allModalities/kosmos.py
from setuptools import find_packages, setup setup( name='gato-tf', version='0.0.2', description='Unofficial Gato: A Generalist Agent', url='https://github.com/OrigamiDream/gato.git', author='OrigamiDream', author_email='sdy36071@naver.com', license='MIT', packages=find_packages(), i...
EXA-1-master
exa/models/gato/setup.py
import tensorflow as tf from tensorflow.keras.optimizers import schedules, AdamW from gato import GatoConfig from gato.models import Gato # Load and preprocess your dataset def load_and_preprocess_dataset(): # Load and preprocess your dataset here # Return the dataset as a tf.data.Dataset object pass # In...
EXA-1-master
exa/models/gato/train.py
import copy from typing import Dict, Any class GatoConfig: @staticmethod def large(): return GatoConfig(num_transformer_blocks=24, num_attention_heads=16, layer_width=2048, feedforward_hidden_size=8192, ...
EXA-1-master
exa/models/gato/gato/config.py
from gato.config import GatoConfig from gato.models import Gato
EXA-1-master
exa/models/gato/gato/__init__.py
import tensorflow as tf from tensorflow.keras import layers, models from gato import GatoConfig from typing import Dict, Any, Union def _randomized_positions(from_v, to_v): pos = tf.random.uniform(from_v.shape, minval=0, maxval=1, dtype=tf.float32) pos = pos * tf.cast(to_v - from_v, dtype=tf.float32) pos...
EXA-1-master
exa/models/gato/gato/models/embedding.py
import tensorflow as tf from gato.models.transformer import TransformerBlock from gato.models.embedding import PatchPositionEncoding, ResidualEmbedding, LocalPositionEncoding, DiscreteEmbedding from gato.models.tokenizers import ContinuousValueTokenizer from tensorflow.keras import models from gato import GatoConfig ...
EXA-1-master
exa/models/gato/gato/models/__init__.py
import tensorflow as tf from tensorflow.keras import layers, models, activations from gato import GatoConfig from typing import Dict, Any, Union class TransformerBlock(layers.Layer): def __init__(self, config: Union[GatoConfig, Dict[str, Any]], trainable: bool = True, ...
EXA-1-master
exa/models/gato/gato/models/transformer.py
import tensorflow as tf from gato import GatoConfig from tensorflow.keras import models from typing import Union, Dict, Any def mu_law_encode(x, mu=100, m=256): # Appendix B. Agent Data Tokenization Details sign = tf.math.sign(x) numerator = tf.math.log(tf.abs(x) * mu + 1.0) denominator = tf.math.log...
EXA-1-master
exa/models/gato/gato/models/tokenizers.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from setuptools import find_packages, setup setup( name="segment_anything", version="1.0", install_requires=[...
EXA-1-master
exa/models/segment-anything-main/setup.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from segment_anything.modeling import Sam from typing import Optional, Tuple from .util...
EXA-1-master
exa/models/segment-anything-main/segment_anything/predictor.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch from functools import partial from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWa...
EXA-1-master
exa/models/segment-anything-main/segment_anything/build_sam.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing impor...
EXA-1-master
exa/models/segment-anything-main/segment_anything/automatic_mask_generator.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from .build_sam import ( build_sam, build_sam_vit_h, build_sam_vit_l, build_sam_vit_b, sam_model_regis...
EXA-1-master
exa/models/segment-anything-main/segment_anything/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import math from copy import deepcopy from itertools import product from typing import An...
EXA-1-master
exa/models/segment-anything-main/segment_anything/utils/amg.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from torch.nn import functional as F from torchvision.transforms.functional import resize,...
EXA-1-master
exa/models/segment-anything-main/segment_anything/utils/transforms.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from torch.nn import functional as F from typing import Tuple from ..modeling import ...
EXA-1-master
exa/models/segment-anything-main/segment_anything/utils/onnx.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree.
EXA-1-master
exa/models/segment-anything-main/segment_anything/utils/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from .sam import Sam from .image_encoder import ImageEncoderViT from .mask_decoder import MaskDecoder from .prompt_encoder...
EXA-1-master
exa/models/segment-anything-main/segment_anything/modeling/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from typing import Type class MLPBlock(nn.Module): def __init__( self, ...
EXA-1-master
exa/models/segment-anything-main/segment_anything/modeling/common.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch from torch import Tensor, nn import math from typing import Tuple, Type from .common import MLPBlock clas...
EXA-1-master
exa/models/segment-anything-main/segment_anything/modeling/transformer.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Type from .common...
EXA-1-master
exa/models/segment-anything-main/segment_anything/modeling/image_encoder.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from torch import nn from typing import Any, Optional, Tuple, Type from .common import L...
EXA-1-master
exa/models/segment-anything-main/segment_anything/modeling/prompt_encoder.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from torch.nn import functional as F from typing import Any, Dict, List, Tuple from .i...
EXA-1-master
exa/models/segment-anything-main/segment_anything/modeling/sam.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from torch.nn import functional as F from typing import List, Tuple, Type from .common...
EXA-1-master
exa/models/segment-anything-main/segment_anything/modeling/mask_decoder.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import cv2 # type: ignore from segment_anything import SamAutomaticMaskGenerator, sam_model_registry import argparse im...
EXA-1-master
exa/models/segment-anything-main/scripts/amg.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch from segment_anything import sam_model_registry from segment_anything.utils.onnx import SamOnnxModel import...
EXA-1-master
exa/models/segment-anything-main/scripts/export_onnx_model.py
''' Adapted from https://github.com/lupantech/ScienceQA ''' from dataclasses import dataclass from typing import List, Optional def get_question_text(problem): question = problem['question'] return question def get_context_text(problem, use_caption): txt_context = problem['hint'] img_context = probl...
EXA-1-master
exa/models/mm-cot-main/utils_prompt.py
''' Adapted from https://github.com/lupantech/ScienceQA ''' import re from rouge import Rouge from nltk.translate.bleu_score import sentence_bleu from sentence_transformers import util ######################## ## BLEU ######################## def tokenize(text): tokens = re.split(r'\s|\.', text) tokens = [t f...
EXA-1-master
exa/models/mm-cot-main/evaluations.py
''' Adapted from https://github.com/huggingface/transformers ''' from transformers import T5Config, T5ForConditionalGeneration from transformers.models.t5.modeling_t5 import T5Stack, __HEAD_MASK_WARNING_MSG, T5EncoderModel import copy import math import os import warnings from typing import Optional, Tuple, Union impo...
EXA-1-master
exa/models/mm-cot-main/model.py
import os from torch.utils.data import Dataset import os import json import numpy as np import torch from utils_prompt import * img_shape = { "resnet": (512, 2048), "clip": (49, 2048), "detr": (100, 256), } def load_data_std(args): problems = json.load(open(os.path.join(args.data_root, 'scienceqa/prob...
EXA-1-master
exa/models/mm-cot-main/utils_data.py
''' Adapted from https://github.com/lupantech/ScienceQA ''' import os import json import argparse import warnings import pandas as pd from sentence_transformers import SentenceTransformer from evaluations import caculate_bleu, caculate_rouge, caculate_similariry warnings.filterwarnings('ignore') def get_acc_with_con...
EXA-1-master
exa/models/mm-cot-main/utils_evaluate.py
import os import numpy as np import torch import os import re import json import argparse import random from transformers import T5Tokenizer, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer, T5ForConditionalGeneration from model import T5ForConditionalGeneration, T5ForMultimodalGeneration from utils_da...
EXA-1-master
exa/models/mm-cot-main/main.py
import os import copy import pytorch_lightning as pl from vlmo.config import ex from vlmo.modules import VLMo from vlmo.datamodules.multitask_datamodule import MTDataModule from pytorch_lightning.plugins import environments as pl_env from pytorch_lightning.utilities.distributed import rank_zero_info class OMPIClust...
EXA-1-master
exa/models/unilm-master/vlmo/run.py
from setuptools import setup, find_packages setup( name="vlmo", packages=find_packages( exclude=[".dfc", ".vscode", "dataset", "notebooks", "result", "scripts"] ), version="1.0.0", license="MIT", description="VLMo: Unified Vision-Language Pre-Training with Mixture-of-Modality-Experts", ...
EXA-1-master
exa/models/unilm-master/vlmo/setup.py
from sacred import Experiment ex = Experiment("VLMo") def _loss_names(d): ret = { "itm": 0, # image-text matching loss "itc": 0, # image-text contrastive loss "mlm": 0, # masked language modeling loss "textmlm": 0, # text-only masked language modeling "vqa": 0, "nl...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/config.py
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/__init__.py
from .base_dataset import BaseDataset class F30KCaptionKarpathyDataset(BaseDataset): def __init__(self, *args, split="", **kwargs): assert split in ["train", "val", "test"] if split == "train": names = ["f30k_caption_karpathy_train"] elif split == "val": names = ["...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datasets/f30k_caption_karpathy_dataset.py
from .base_dataset import BaseDataset class VisualGenomeCaptionDataset(BaseDataset): def __init__(self, *args, split="", **kwargs): assert split in ["train", "val", "test"] if split == "test": split = "val" if split == "train": names = ["vg"] elif split == ...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datasets/vg_caption_dataset.py
import random import torch import io import pyarrow as pa import os from PIL import Image from vlmo.transforms import keys_to_transforms class BaseDataset(torch.utils.data.Dataset): def __init__( self, data_dir: str, transform_keys: list, image_size: int, names: list, ...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datasets/base_dataset.py
from .base_dataset import BaseDataset class CocoCaptionKarpathyDataset(BaseDataset): def __init__(self, *args, split="", **kwargs): assert split in ["train", "val", "test"] self.split = split if split == "train": names = ["coco_caption_karpathy_train", "coco_caption_karpathy_r...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datasets/coco_caption_karpathy_dataset.py
from glob import glob from .base_dataset import BaseDataset class WikibkDataset(BaseDataset): def __init__(self, *args, split="", **kwargs): assert split in ["train", "val", "test"] if split == "test": split = "val" if split == "train": names = [f"wikibk_train_{i}"...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datasets/wikibk_dataset.py
from .vg_caption_dataset import VisualGenomeCaptionDataset from .coco_caption_karpathy_dataset import CocoCaptionKarpathyDataset from .f30k_caption_karpathy_dataset import F30KCaptionKarpathyDataset from .conceptual_caption_dataset import ConceptualCaptionDataset from .sbu_caption_dataset import SBUCaptionDataset from ...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datasets/__init__.py
from glob import glob from .base_dataset import BaseDataset class SBUCaptionDataset(BaseDataset): def __init__(self, *args, split="", **kwargs): assert split in ["train", "val", "test"] if split == "test": split = "val" if split == "train": names = [f"sbu_{i}" for ...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datasets/sbu_caption_dataset.py
from .base_dataset import BaseDataset import sys import random class NLVR2Dataset(BaseDataset): def __init__(self, *args, split="", **kwargs): assert split in ["train", "val", "test"] self.split = split if split == "train": names = ["nlvr2_train"] elif split == "val": ...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datasets/nlvr2_dataset.py
from .base_dataset import BaseDataset class VQAv2Dataset(BaseDataset): def __init__(self, *args, split="", **kwargs): assert split in ["train", "val", "test"] self.split = split if split == "train": names = ["vqav2_train", "vqav2_trainable_val"] elif split == "val": ...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datasets/vqav2_dataset.py
from glob import glob from .base_dataset import BaseDataset class ConceptualCaptionDataset(BaseDataset): def __init__(self, *args, split="", **kwargs): assert split in ["train", "val", "test"] if split == "test": split = "val" if split == "train": names = [f"concep...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datasets/conceptual_caption_dataset.py
from vlmo.datasets import NLVR2Dataset from .datamodule_base import BaseDataModule class NLVR2DataModule(BaseDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def dataset_cls(self): return NLVR2Dataset @property def dataset_name(self): ...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datamodules/nlvr2_datamodule.py
import functools from pytorch_lightning import LightningDataModule from torch.utils.data import DataLoader from torch.utils.data.dataset import ConcatDataset from torch.utils.data.distributed import DistributedSampler from . import _datamodules class MTDataModule(LightningDataModule): def __init__(self, _config...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datamodules/multitask_datamodule.py
import torch from pytorch_lightning import LightningDataModule from torch.utils.data import DataLoader from transformers import ( DataCollatorForLanguageModeling, DataCollatorForWholeWordMask, BertTokenizer, ) def get_pretrained_tokenizer(from_pretrained): if torch.distributed.is_initialized(): ...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datamodules/datamodule_base.py
from vlmo.datasets import ConceptualCaptionDataset from .datamodule_base import BaseDataModule class ConceptualCaptionDataModule(BaseDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def dataset_cls(self): return ConceptualCaptionDataset @p...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datamodules/conceptual_caption_datamodule.py
from vlmo.datasets import SBUCaptionDataset from .datamodule_base import BaseDataModule class SBUCaptionDataModule(BaseDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def dataset_cls(self): return SBUCaptionDataset @property def datas...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datamodules/sbu_datamodule.py
from .vg_caption_datamodule import VisualGenomeCaptionDataModule from .f30k_caption_karpathy_datamodule import F30KCaptionKarpathyDataModule from .coco_caption_karpathy_datamodule import CocoCaptionKarpathyDataModule from .conceptual_caption_datamodule import ConceptualCaptionDataModule from .sbu_datamodule import SBUC...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datamodules/__init__.py
from vlmo.datasets import VisualGenomeCaptionDataset from .datamodule_base import BaseDataModule class VisualGenomeCaptionDataModule(BaseDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def dataset_cls(self): return VisualGenomeCaptionDataset ...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datamodules/vg_caption_datamodule.py
from vlmo.datasets import VQAv2Dataset from .datamodule_base import BaseDataModule from collections import defaultdict class VQAv2DataModule(BaseDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def dataset_cls(self): return VQAv2Dataset @p...
EXA-1-master
exa/models/unilm-master/vlmo/vlmo/datamodules/vqav2_datamodule.py