code stringlengths 141 97.3k | apis listlengths 1 24 | extract_api stringlengths 113 214k |
|---|---|---|
import langchain
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.cache import InMemoryCache
from langchain import PromptTemplate
import os
import openai
from langchain.prompts import (
ChatPromptTemplat... | [
"langchain.prompts.SystemMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.llms.OpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.prompts.AIMessagePromptTemplate.from_template",
"langchain.prompts.HumanMessagePromptTemplate.from_template"
] | [((734, 742), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (740, 742), False, 'from langchain.llms import OpenAI\n'), ((750, 784), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key'}), '(openai_api_key=api_key)\n', (760, 784), False, 'from langchain.chat_models import ChatOpenAI... |
"""Load html from files, clean up, split, ingest into Weaviate."""
import logging
import os
import re
# from parser import langchain_docs_extractor
import weaviate
import faiss
from bs4 import BeautifulSoup, SoupStrainer
from langchain_community.document_loaders import RecursiveUrlLoader, SitemapLoader, DirectoryLoade... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain_community.document_loaders.RecursiveUrlLoader",
"langchain_community.document_loaders.DirectoryLoader",
"langchain.vectorstores.weaviate.Weaviate",
"langchain.indexes.SQLRecordManager"
] | [((722, 761), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (741, 761), False, 'import logging\n'), ((771, 798), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (788, 798), False, 'import logging\n'), ((3746, 3773), 'bs4.BeautifulS... |
import langchain
from langchain.llms import GooglePalm
from langchain.document_loaders import CSVLoader
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
import os
from dot... | [
"langchain.prompts.PromptTemplate",
"langchain.vectorstores.FAISS.from_documents",
"langchain.document_loaders.CSVLoader",
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.llms.GooglePalm",
"langchain.embeddings.HuggingFaceInstructEmbeddings"
] | [((344, 357), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (355, 357), False, 'from dotenv import load_dotenv\n'), ((917, 989), 'langchain.llms.GooglePalm', 'GooglePalm', ([], {'google_api_key': "os.environ['GOOGLE_API_KEY']", 'temperature': '(0.7)'}), "(google_api_key=os.environ['GOOGLE_API_KEY'], temperatur... |
import streamlit as st
import langchain as lc
from typing import Callable
from utils import *
#####################################################
# This file contains everything reusable in the app #
#####################################################
def show_past_conversations():
conversations = get_conver... | [
"langchain.callbacks.get_openai_callback"
] | [((942, 1102), 'streamlit.number_input', 'st.number_input', (['"""Monthly limit ($)"""'], {'value': '(15.0)', 'min_value': '(1.0)', 'max_value': '(120.0)', 'step': '(1.0)', 'format': '"""%.2f"""', 'help': '"""The monthly limit for the OpenAI API"""'}), "('Monthly limit ($)', value=15.0, min_value=1.0, max_value=\n 1... |
import langchain
from langchain.chains.llm import LLMChain
from langchain_openai import AzureChatOpenAI
from langchain.memory import ReadOnlySharedMemory, ConversationBufferMemory
from langchain.agents import BaseSingleActionAgent, Tool, AgentType, initialize_agent, AgentExecutor
from langchain.chat_models.base import... | [
"langchain.chains.llm.LLMChain",
"langchain.prompts.chat.MessagesPlaceholder",
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.agents.initialize_agent",
"langchain.schema.OutputParserException",
"langchain.prompts.chat.SystemMessagePromptTemplate.from_template",
"langchain.prompts.Prom... | [((4432, 4548), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.chat_model', 'prompt': 'router_prompt_template', 'memory': 'self.readonly_memory', 'verbose': 'self.verbose'}), '(llm=self.chat_model, prompt=router_prompt_template, memory=self.\n readonly_memory, verbose=self.verbose)\n', (4440, 4548),... |
import os
import openai
import pinecone
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from langchain.llms import OpenAI
from langchain.chat_mod... | [
"langchain.vectorstores.Pinecone.from_documents",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.document_loaders.DirectoryLoader",
"langchain.memory.ConversationBufferMemory",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((579, 592), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (590, 592), False, 'from dotenv import load_dotenv\n'), ((609, 636), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (618, 636), False, 'import os\n'), ((656, 685), 'os.getenv', 'os.getenv', (['"""PINECONE_API_KEY"""'... |
# Import langchain and azure cognitive search
import langchain
from typing import Dict, List
from pydantic import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
from langchain.tools.base import BaseTool
from azure.core.credentials import AzureKeyCredential
from azure.search.d... | [
"langchain.utils.get_from_dict_or_env"
] | [((1527, 1551), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (1541, 1551), False, 'from pydantic import BaseModel, Extra, root_validator\n'), ((1721, 1813), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""azure_cognitive_search_key"""', '"""AZURE_... |
from langchain.chat_models import ChatOpenAI
from langchain.agents import tool, load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
import langchain
langchain.debug = True
# llm
llm = ChatOpenAI(temperature=0)
# tools
@tool
def get_word_length(word: str) -> int:
"""Re... | [
"langchain.agents.load_tools",
"langchain.agents.initialize_agent",
"langchain.chat_models.ChatOpenAI"
] | [((231, 256), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (241, 256), False, 'from langchain.chat_models import ChatOpenAI\n'), ((381, 414), 'langchain.agents.load_tools', 'load_tools', (["['llm-math']"], {'llm': 'llm'}), "(['llm-math'], llm=llm)\n", (391, 414), ... |
from langchain.agents import (
initialize_agent,
Tool,
AgentType
)
from llama_index.callbacks import (
CallbackManager,
LlamaDebugHandler
)
from llama_index.node_parser.simple import SimpleNodeParser
from llama_index import (
VectorStoreIndex,
SummaryIndex,
SimpleDirectoryReader,
ServiceConte... | [
"langchain.chat_models.ChatOpenAI"
] | [((398, 456), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (417, 456), False, 'import logging\n'), ((529, 545), 'os.getenv', 'os.getenv', (['"""LLM"""'], {}), "('LLM')\n", (538, 545), False, 'import os\n'), ((1217, 12... |
'''
@Author: WANG Maonan
@Date: 2023-09-04 20:46:09
@Description: 基于 LLM-ReAct 的 Traffic Light Control
1. 会有数据库, 我们会搜索最相似的场景 (如何定义场景的相似程度), 然后可以存储在 memory 里面, 或者放在 query 里面
2. 不同的 action 检查
- getAvailableActions, 获得当前所有的动作
- get queue length of all phases
- get emergency vehicle
- check possible queu... | [
"langchain.chat_models.ChatOpenAI"
] | [((1268, 1290), 'tshub.utils.get_abs_path.get_abs_path', 'get_abs_path', (['__file__'], {}), '(__file__)\n', (1280, 1290), False, 'from tshub.utils.get_abs_path import get_abs_path\n'), ((1379, 1392), 'utils.readConfig.read_config', 'read_config', ([], {}), '()\n', (1390, 1392), False, 'from utils.readConfig import rea... |
import langchain
import requests
from pydantic import ValidationError
from langchain_core.prompts import ChatPromptTemplate
#from langchain import chains
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
#from rmrkl import ChatZeroShotAgent, RetryAgentExecutor
from langchain.agents ... | [
"langchain.agents.format_scratchpad.openai_tools.format_to_openai_tool_messages",
"langchain.agents.AgentExecutor",
"langchain.agents.output_parsers.openai_tools.OpenAIToolsAgentOutputParser",
"langchain_openai.ChatOpenAI",
"langchain.prompts.MessagesPlaceholder",
"langchain.callbacks.streaming_stdout.Str... | [((1066, 1220), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temp', 'model_name': 'model', 'request_timeout': '(1000)', 'streaming': '(False)', 'callbacks': 'callbacks', 'openai_api_key': 'api_key', 'verbose': '(False)'}), '(temperature=temp, model_name=model, request_timeout=1000,\n streaming=... |
"""Beta Feature: base interface for cache."""
import hashlib
import json
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, cast
from sqlalchemy import Column, Integer, String, create_engine, select
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm impo... | [
"langchain.schema.Generation",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis"
] | [((2037, 2055), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (2053, 2055), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((2212, 2244), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (2218, 2244), Fal... |
# Needs to be in same directory as configs, data folder
# Imports
from _OpalLLM import OpalLLM
from _OpalLLM import OpalLLM
import sys
sys.path.append('/home/jovyan/.local/lib/python3.8/site-packages')
import torch
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langcha... | [
"langchain.chains.ConversationChain",
"langchain.LLMChain",
"langchain.llms.HuggingFacePipeline",
"langchain.PromptTemplate"
] | [((138, 204), 'sys.path.append', 'sys.path.append', (['"""/home/jovyan/.local/lib/python3.8/site-packages"""'], {}), "('/home/jovyan/.local/lib/python3.8/site-packages')\n", (153, 204), False, 'import sys\n'), ((3396, 3513), '_OpalLLM.OpalLLM', 'OpalLLM', ([], {'model': '"""lmsys/vicuna-33b"""', 'temperature': '(0.1)',... |
"""Utilities for running language models or Chains over datasets."""
from __future__ import annotations
import asyncio
import functools
import itertools
import logging
from datetime import datetime
from typing import (
Any,
Callable,
Coroutine,
Dict,
Iterator,
List,
Optional,
Sequence,... | [
"langchain.smith.evaluation.string_run_evaluator.StringRunEvaluatorChain.from_run_and_data_type",
"langchain.evaluation.loading.load_evaluator",
"langchain.callbacks.tracers.evaluation.EvaluatorCallbackHandler",
"langchain.chat_models.openai.ChatOpenAI",
"langchain.schema.messages.messages_from_dict",
"la... | [((1366, 1393), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1383, 1393), False, 'import logging\n'), ((1704, 1721), 'urllib.parse.urlparse', 'urlparse', (['api_url'], {}), '(api_url)\n', (1712, 1721), False, 'from urllib.parse import urlparse, urlunparse\n'), ((24715, 24735), 'asyncio... |
import logging
import os
import openai
from langchain.chat_models import AzureChatOpenAI
import vishwa
from vishwa.mlmonitor.langchain.decorators.map_xpuls_project import MapXpulsProject
from vishwa.mlmonitor.langchain.decorators.telemetry_override_labels import TelemetryOverrideLabels
from vishwa.mlmonitor.langchain... | [
"langchain.chat_models.AzureChatOpenAI"
] | [((498, 525), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (515, 525), False, 'import logging\n'), ((544, 571), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (553, 571), False, 'import os\n'), ((616, 639), 'os.getenv', 'os.getenv', (['"""OPENAI_URL"""... |
import ast
import copy
import json
import logging
from typing import List, Tuple, Dict, Callable
import langchain
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, AIMessagePromptTemplate
from langchain.prompts.chat import BaseMessagePromptTemplate
from langchain.schema import LLMResult
fro... | [
"langchain.PromptTemplate.from_template",
"langchain.prompts.HumanMessagePromptTemplate",
"langchain.LLMChain",
"langchain.schema.LLMResult"
] | [((557, 584), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (574, 584), False, 'import logging\n'), ((2451, 2481), 'copy.deepcopy', 'copy.deepcopy', (['self.gen_kwargs'], {}), '(self.gen_kwargs)\n', (2464, 2481), False, 'import copy\n'), ((2738, 2774), 'langchain.LLMChain', 'LLMChain', (... |
import os
import openai
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.chains import SequentialChain
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
openai.api_key = os.environ['OPENAI_API_KEY']
llm = OpenAI(te... | [
"langchain.chains.LLMChain",
"langchain_helper.generate_restaurant_name_and_items",
"langchain.llms.OpenAI",
"langchain.prompts.PromptTemplate",
"langchain.chains.SequentialChain"
] | [((311, 334), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.7)'}), '(temperature=0.7)\n', (317, 334), False, 'from langchain.llms import OpenAI\n'), ((384, 421), 'streamlit.title', 'st.title', (['"""Restaurant Name Generator"""'], {}), "('Restaurant Name Generator')\n", (392, 421), True, 'import streamlit... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from concurrent.futures import ThreadPoolExecutor
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
... | [
"langchain.schema.messages.get_buffer_string",
"langchain.callbacks.tracers.langchain_v1.LangChainTracerV1",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.tracers.run_collector.RunCollectorCallbackHandler",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tr... | [((1521, 1548), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1538, 1548), False, 'import logging\n'), ((1617, 1660), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1627, 1660), False, 'from contextvars i... |
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate
from langchain.indexes.vectorstore import VectorstoreIndexCreator
fro... | [
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.docstore.document.Document",
"langchain.OpenAI",
"langchain.chains.question_answering.load_qa_chain"
] | [((568, 616), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (577, 616), False, 'import os\n'), ((666, 684), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (682, 684), False, 'from langchain.embeddings.open... |
import langchain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.cache import InMemoryCache
from dotenv import... | [
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.InMemoryCache"
] | [((468, 483), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (473, 483), False, 'from flask import Flask, request, jsonify\n'), ((484, 493), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (488, 493), False, 'from flask_cors import CORS\n'), ((516, 531), 'langchain.cache.InMemoryCache', 'InMemoryCache... |
import os
import pandas as pd
import requests
import openai
import chromadb
import langchain
from langchain.chains import RetrievalQA, SimpleSequentialChain, LLMChain
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.docstore.docum... | [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.chat_models.ChatOpenAI",
"langchain.chains.question_answering.load_qa_chain",
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.docstore.document.Document",
"langchain.vectorstores.Chroma"
] | [((591, 604), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (602, 604), False, 'from dotenv import load_dotenv\n'), ((612, 639), 'os.environ.get', 'os.environ.get', (['"""peace_dir"""'], {}), "('peace_dir')\n", (626, 639), False, 'import os\n'), ((657, 689), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API... |
import os
from dotenv import load_dotenv
import openai
import langchain
import azure.cognitiveservices.speech as speechsdk
import elevenlabs
import json
import requests
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.agents import AgentExecut... | [
"langchain.agents.initialize_agent",
"langchain.chat_models.ChatOpenAI",
"langchain.SQLDatabase.from_uri",
"langchain.agents.agent_toolkits.SQLDatabaseToolkit",
"langchain.agents.create_sql_agent",
"langchain.SerpAPIWrapper",
"langchain.OpenAI",
"langchain.agents.Tool"
] | [((968, 1038), 'azure.cognitiveservices.speech.SpeechConfig', 'speechsdk.SpeechConfig', ([], {'subscription': 'speech_key', 'region': 'service_region'}), '(subscription=speech_key, region=service_region)\n', (990, 1038), True, 'import azure.cognitiveservices.speech as speechsdk\n'), ((1059, 1114), 'azure.cognitiveservi... |
# main.py
#####################################################################
# Amazon Bedrock - boto3
#####################################################################
import boto3
# Setup bedrock
bedrock_runtime = boto3.client(
service_name="bedrock-runtime",
region_name="us-east-1",
)
#############... | [
"langchain.llms.Bedrock",
"langchain.embeddings.BedrockEmbeddings"
] | [((225, 294), 'boto3.client', 'boto3.client', ([], {'service_name': '"""bedrock-runtime"""', 'region_name': '"""us-east-1"""'}), "(service_name='bedrock-runtime', region_name='us-east-1')\n", (237, 294), False, 'import boto3\n'), ((760, 837), 'langchain.llms.Bedrock', 'Bedrock', ([], {'client': 'bedrock_runtime', 'mode... |
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Sequence
from pydantic import Field, root_validator
import langchain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.bas... | [
"langchain.llm_cache.lookup",
"langchain.schema.messages.HumanMessage",
"langchain.schema.messages.AIMessage",
"langchain.schema.ChatGeneration",
"langchain.load.dump.dumps",
"langchain.schema.RunInfo",
"langchain.llm_cache.update",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"lang... | [((915, 952), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (920, 952), False, 'from pydantic import Field, root_validator\n'), ((1026, 1059), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (103... |
import os
import streamlit as st
import pickle
import time
import langchain
from langchain.llms import OpenAI
from langchain.document_loaders import UnstructuredURLLoader
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langch... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.vectorstores.FAISS.from_documents",
"langchain.document_loaders.UnstructuredURLLoader",
"langchain.embeddings.OpenAIEmbeddings"
] | [((468, 500), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (482, 500), False, 'import os\n'), ((508, 547), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.7)', 'max_tokens': '(500)'}), '(temperature=0.7, max_tokens=500)\n', (514, 547), False, 'from langchain.ll... |
#!/usr/bin/env python
# coding: utf-8
# Blackboard-PAGI - LLM Proto-AGI using the Blackboard Pattern
# Copyright (c) 2023. Andreas Kirsch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Found... | [
"langchain.cache.SQLiteCache",
"langchain.output_parsers.PydanticOutputParser"
] | [((1950, 1998), 'langchain.cache.SQLiteCache', 'SQLiteCache', (['""".execution_llm_spike.langchain.db"""'], {}), "('.execution_llm_spike.langchain.db')\n", (1961, 1998), False, 'from langchain.cache import SQLiteCache\n'), ((2081, 2113), 'blackboard_pagi.cached_chat_model.CachedChatOpenAI', 'CachedChatOpenAI', ([], {'m... |
import httpcore
setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import speech_recognition as sr
import langid
from pydub import AudioSegment
import langchain
import subprocess
from langchain.chat_models im... | [
"langchain.memory.ConversationBufferMemory",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.PromptTemplate.from_template"
] | [((3911, 4381), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""You are a normal consulting nurse/doctor. You will recieve some keywords or sentences described by the patient as input. You have to ask the patient two follow up question so as to acquire the information important t... |
import streamlit as st
from streamlit_chat import message
import langchain_helper as lch
from langchain.schema import (SystemMessage, HumanMessage, AIMessage, messages)
def main():
st.set_page_config(
page_title="Iliad technical assessment",
page_icon="🤖",
)
st.header("ChatBot Free Assist... | [
"langchain.schema.AIMessage",
"langchain_helper.main",
"langchain.schema.HumanMessage"
] | [((187, 261), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Iliad technical assessment"""', 'page_icon': '"""🤖"""'}), "(page_title='Iliad technical assessment', page_icon='🤖')\n", (205, 261), True, 'import streamlit as st\n'), ((289, 325), 'streamlit.header', 'st.header', (['"""ChatBot Fr... |
from typing import ClassVar
from langchain.chains.base import Chain
from typing import Any, Type
import os
import langchain
from langchain.cache import SQLiteCache
langchain.llm_cache = SQLiteCache()
class BaseChain(Chain):
template_file: ClassVar[str]
generator_template: ClassVar[str]
normalizer_templa... | [
"langchain.cache.SQLiteCache"
] | [((188, 201), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {}), '()\n', (199, 201), False, 'from langchain.cache import SQLiteCache\n')] |
""" This example shows how to use the map-reduce chain to summarize a document. """
import os
import langchain
from langchain_openai import ChatOpenAI
from langchain.chains.summarize import load_summarize_chain
from langchain_community.document_loaders import PyPDFLoader
from dotenv import load_dotenv
lo... | [
"langchain.chains.summarize.load_summarize_chain",
"langchain_community.document_loaders.PyPDFLoader",
"langchain_openai.ChatOpenAI"
] | [((318, 331), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (329, 331), False, 'from dotenv import load_dotenv\n'), ((352, 379), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (361, 379), False, 'import os\n'), ((415, 479), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'... |
"""LLM Chains for executing Retrival Augmented Generation."""
import base64
import os
from functools import lru_cache
from pathlib import Path
from typing import TYPE_CHECKING, Generator, List, Optional
import torch
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import HuggingFaceTextGenInf... | [
"langchain.llms.HuggingFaceTextGenInference",
"langchain.embeddings.HuggingFaceEmbeddings"
] | [((3156, 3202), 'os.environ.get', 'os.environ.get', (['"""APP_CONFIG_FILE"""', '"""/dev/null"""'], {}), "('APP_CONFIG_FILE', '/dev/null')\n", (3170, 3202), False, 'import os\n'), ((3216, 3262), 'chain_server.configuration.AppConfig.from_file', 'configuration.AppConfig.from_file', (['config_file'], {}), '(config_file)\n... |
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_setup import llm
def setup_memory():
documents = SimpleDirectoryReader("./Data").load_data()
embed_model = Lan... | [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings"
] | [((429, 507), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(256)', 'llm': 'llm', 'embed_model': 'embed_model'}), '(chunk_size=256, llm=llm, embed_model=embed_model)\n', (457, 507), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext... |
from modules.preprocessors import BasePreprocessor
from modules.templates import CONDENSE_QUESTION_TEMPLATE
from utils import create_collection, create_save_collection
import langchain
from typing import Optional, Any, Dict, Union
from langchain.schema import BaseDocumentTransformer
from langchain.schema.prompt_templa... | [
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.InMemoryCache",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.memory.ConversationBufferMemory",
"langchain.vectorstores.Chroma"
] | [((1674, 1689), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (1687, 1689), False, 'from langchain.cache import InMemoryCache\n'), ((3798, 3896), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""answer"""', 'return... |
import os
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.vecto... | [
"langchain.indexes.VectorstoreIndexCreator",
"langchain.chat_models.ChatOpenAI",
"langchain.evaluation.qa.QAEvalChain.from_llm",
"langchain.document_loaders.CSVLoader"
] | [((409, 434), 'langchain.document_loaders.CSVLoader', 'CSVLoader', ([], {'file_path': 'file'}), '(file_path=file)\n', (418, 434), False, 'from langchain.document_loaders import CSVLoader\n'), ((565, 592), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.0)'}), '(temperature=0.0)\n', (575, 592)... |
import langchain_visualizer # isort:skip # noqa: F401
from fvalues import FValue
from langchain import FewShotPromptTemplate, PromptTemplate
def test_few_shot_f():
examples = [
{"word": "happy", "antonym": "sad"},
{"word": "tall", "antonym": "short"},
# Should be able to handle extra ke... | [
"langchain.FewShotPromptTemplate",
"langchain.PromptTemplate"
] | [((455, 544), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['word', 'antonym']", 'template': '"""w={word},a={antonym}"""'}), "(input_variables=['word', 'antonym'], template=\n 'w={word},a={antonym}')\n", (469, 544), False, 'from langchain import FewShotPromptTemplate, PromptTemplate\n'), (... |
import langchain.utilities.opaqueprompts as op
from langchain import LLMChain, PromptTemplate
from langchain.llms import OpenAI
from langchain.llms.opaqueprompts import OpaquePrompts
from langchain.memory import ConversationBufferWindowMemory
from langchain.schema.output_parser import StrOutputParser
from langchain.sch... | [
"langchain.utilities.opaqueprompts.desanitize",
"langchain.llms.OpenAI",
"langchain.memory.ConversationBufferWindowMemory",
"langchain.schema.output_parser.StrOutputParser",
"langchain.PromptTemplate.from_template"
] | [((2863, 2871), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (2869, 2871), False, 'from langchain.llms import OpenAI\n'), ((2805, 2850), 'langchain.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2833, 2850), False, 'from langchain import LLMChai... |
from langchain.chat_models import ChatOpenAI
from dreamsboard.dreams.dreams_personality_chain.base import StoryBoardDreamsGenerationChain
import logging
import langchain
langchain.verbose = True
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# 控制台打印
handler = logging.StreamHandler()
handler.setLev... | [
"langchain.chat_models.ChatOpenAI"
] | [((205, 232), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (222, 232), False, 'import logging\n'), ((282, 305), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (303, 305), False, 'import logging\n'), ((782, 806), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([],... |
"""Test Upstash Redis cache functionality."""
import uuid
import pytest
import langchain
from langchain.cache import UpstashRedisCache
from langchain.schema import Generation, LLMResult
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
URL = "<UPSTASH_... | [
"langchain.llm_cache.lookup",
"langchain.schema.Generation",
"langchain.llm_cache.redis.pttl",
"langchain.llm_cache.clear",
"langchain.llm_cache.redis.flushall",
"langchain.llm_cache._key"
] | [((436, 473), 'pytest.mark.requires', 'pytest.mark.requires', (['"""upstash_redis"""'], {}), "('upstash_redis')\n", (456, 473), False, 'import pytest\n'), ((809, 846), 'pytest.mark.requires', 'pytest.mark.requires', (['"""upstash_redis"""'], {}), "('upstash_redis')\n", (829, 846), False, 'import pytest\n'), ((2491, 252... |
from uuid import UUID
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser, initialize_agent
from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate, ChatPromptTemplate, AIMessagePromptTemplate, PromptTemplate
from langchain import OpenAI, SerpAPIWrappe... | [
"langchain.prompts.HumanMessagePromptTemplate",
"langchain.chat_models.ChatOpenAI",
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.prompts.SystemMessagePromptTemplate",
"langchain.schema.AgentFinish",
"langchain.schema.AgentAction"... | [((987, 1047), 'langchain.prompts.SystemMessagePromptTemplate', 'SystemMessagePromptTemplate', ([], {'prompt': 'test_human_system_prompt'}), '(prompt=test_human_system_prompt)\n', (1014, 1047), False, 'from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate, ChatPromptTemplate, AIMessagePr... |
import time
import unittest.mock
from typing import Any
from uuid import UUID
from langchainplus_sdk import LangChainPlusClient
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.schemas import Run
from langchain.schema.output import LLMResult
def test_example_id_assi... | [
"langchain.schema.output.LLMResult",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchainplus_sdk.LangChainPlusClient"
] | [((741, 762), 'langchainplus_sdk.LangChainPlusClient', 'LangChainPlusClient', ([], {}), '()\n', (760, 762), False, 'from langchainplus_sdk import LangChainPlusClient\n'), ((780, 810), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'client': 'client'}), '(client=client)\n', (795, 810),... |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
os.environ[... | [
"langchain.llms.Replicate"
] | [((488, 595), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (497, 595), False, 'from langchain.llms import R... |
"""**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-bl... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((677, 697), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (695, 697), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((707, 1102), 'warnings.warn', 'warnings.warn', (['f"""Importing document transformers from langchain is deprecated. Importi... |
"""**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-bl... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((677, 697), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (695, 697), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((707, 1102), 'warnings.warn', 'warnings.warn', (['f"""Importing document transformers from langchain is deprecated. Importi... |
"""**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-bl... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((677, 697), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (695, 697), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((707, 1102), 'warnings.warn', 'warnings.warn', (['f"""Importing document transformers from langchain is deprecated. Importi... |
"""**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-bl... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((677, 697), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (695, 697), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((707, 1102), 'warnings.warn', 'warnings.warn', (['f"""Importing document transformers from langchain is deprecated. Importi... |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
... | [
"langchain.schema.Generation",
"langchain.utils.get_from_env",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.load.load.loads"
] | [((918, 945), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (935, 945), False, 'import logging\n'), ((3390, 3408), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3406, 3408), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((356... |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
... | [
"langchain.schema.Generation",
"langchain.utils.get_from_env",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.load.load.loads"
] | [((918, 945), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (935, 945), False, 'import logging\n'), ((3390, 3408), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3406, 3408), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((356... |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
... | [
"langchain.schema.Generation",
"langchain.utils.get_from_env",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.load.load.loads"
] | [((918, 945), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (935, 945), False, 'import logging\n'), ((3390, 3408), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3406, 3408), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((356... |
# Import Langchain modules
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
# Impo... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.vectorstores.FAISS.from_documents",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.document_loaders.PyPDFLoader"
] | [((573, 606), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (596, 606), False, 'import warnings\n'), ((712, 808), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.IN... |
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class WhatsAppClient:
API_URL = "https://graph.facebook.com/v17.0/"
WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>"
WHATSAPP_CLOUD_NUM... | [
"langchain.llms.Replicate"
] | [((1337, 1444), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1346, 1444), False, 'from langchain.llms impo... |
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class WhatsAppClient:
API_URL = "https://graph.facebook.com/v17.0/"
WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>"
WHATSAPP_CLOUD_NUM... | [
"langchain.llms.Replicate"
] | [((1337, 1444), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1346, 1444), False, 'from langchain.llms impo... |
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class WhatsAppClient:
API_URL = "https://graph.facebook.com/v17.0/"
WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>"
WHATSAPP_CLOUD_NUM... | [
"langchain.llms.Replicate"
] | [((1337, 1444), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1346, 1444), False, 'from langchain.llms impo... |
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class WhatsAppClient:
API_URL = "https://graph.facebook.com/v17.0/"
WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>"
WHATSAPP_CLOUD_NUM... | [
"langchain.llms.Replicate"
] | [((1337, 1444), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1346, 1444), False, 'from langchain.llms impo... |
"""Utility functions for mlflow.langchain."""
import json
import logging
import os
import shutil
import types
from functools import lru_cache
from importlib.util import find_spec
from typing import NamedTuple
import cloudpickle
import yaml
from packaging import version
import mlflow
from mlflow.utils.class_utils impo... | [
"langchain.schema.output_parser.StrOutputParser",
"langchain.chains.loading.load_chain",
"langchain.agents.initialize_agent"
] | [((2001, 2028), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2018, 2028), False, 'import logging\n'), ((10189, 10235), 'os.path.join', 'os.path.join', (['path', '_MODEL_DATA_YAML_FILE_NAME'], {}), '(path, _MODEL_DATA_YAML_FILE_NAME)\n', (10201, 10235), False, 'import os\n'), ((5685, 57... |
"""Beta Feature: base interface for cache."""
import hashlib
import json
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, cast
from sqlalchemy import Column, Integer, String, create_engine, select
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm impo... | [
"langchain.schema.Generation",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis"
] | [((2037, 2055), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (2053, 2055), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((2212, 2244), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (2218, 2244), Fal... |
# Needs to be in same directory as configs, data folder
# Imports
from _OpalLLM import OpalLLM
from _OpalLLM import OpalLLM
import sys
sys.path.append('/home/jovyan/.local/lib/python3.8/site-packages')
import torch
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langcha... | [
"langchain.chains.ConversationChain",
"langchain.LLMChain",
"langchain.llms.HuggingFacePipeline",
"langchain.PromptTemplate"
] | [((138, 204), 'sys.path.append', 'sys.path.append', (['"""/home/jovyan/.local/lib/python3.8/site-packages"""'], {}), "('/home/jovyan/.local/lib/python3.8/site-packages')\n", (153, 204), False, 'import sys\n'), ((3396, 3513), '_OpalLLM.OpalLLM', 'OpalLLM', ([], {'model': '"""lmsys/vicuna-33b"""', 'temperature': '(0.1)',... |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequenc... | [
"langchain.schema.Generation",
"langchain.utils.get_from_env",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.load.load.loads"
] | [((950, 977), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (967, 977), False, 'import logging\n'), ((3422, 3440), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3438, 3440), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((359... |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequenc... | [
"langchain.schema.Generation",
"langchain.utils.get_from_env",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.load.load.loads"
] | [((950, 977), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (967, 977), False, 'import logging\n'), ((3422, 3440), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3438, 3440), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((359... |
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Sequence
from pydantic import Field, root_validator
import langchain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.bas... | [
"langchain.llm_cache.lookup",
"langchain.schema.messages.HumanMessage",
"langchain.schema.messages.AIMessage",
"langchain.schema.ChatGeneration",
"langchain.load.dump.dumps",
"langchain.schema.RunInfo",
"langchain.llm_cache.update",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"lang... | [((915, 952), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (920, 952), False, 'from pydantic import Field, root_validator\n'), ((1026, 1059), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (103... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
... | [
"langchain.schema.messages.get_buffer_string",
"langchain.callbacks.tracers.langchain_v1.LangChainTracerV1",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langch... | [((1329, 1356), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1346, 1356), False, 'import logging\n'), ((1425, 1468), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1435, 1468), False, 'from contextvars i... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
... | [
"langchain.schema.messages.get_buffer_string",
"langchain.callbacks.tracers.langchain_v1.LangChainTracerV1",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langch... | [((1329, 1356), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1346, 1356), False, 'import logging\n'), ((1425, 1468), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1435, 1468), False, 'from contextvars i... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
... | [
"langchain.schema.messages.get_buffer_string",
"langchain.callbacks.tracers.langchain_v1.LangChainTracerV1",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langch... | [((1329, 1356), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1346, 1356), False, 'import logging\n'), ((1425, 1468), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1435, 1468), False, 'from contextvars i... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
... | [
"langchain.schema.messages.get_buffer_string",
"langchain.callbacks.tracers.langchain_v1.LangChainTracerV1",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langch... | [((1329, 1356), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1346, 1356), False, 'import logging\n'), ((1425, 1468), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1435, 1468), False, 'from contextvars i... |
from modules.preprocessors import BasePreprocessor
from modules.templates import CONDENSE_QUESTION_TEMPLATE
from utils import create_collection, create_save_collection
import langchain
from typing import Optional, Any, Dict, Union
from langchain.schema import BaseDocumentTransformer
from langchain.schema.prompt_templa... | [
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.InMemoryCache",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.memory.ConversationBufferMemory",
"langchain.vectorstores.Chroma"
] | [((1674, 1689), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (1687, 1689), False, 'from langchain.cache import InMemoryCache\n'), ((3798, 3896), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""answer"""', 'return... |
from modules.preprocessors import BasePreprocessor
from modules.templates import CONDENSE_QUESTION_TEMPLATE
from utils import create_collection, create_save_collection
import langchain
from typing import Optional, Any, Dict, Union
from langchain.schema import BaseDocumentTransformer
from langchain.schema.prompt_templa... | [
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.InMemoryCache",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.memory.ConversationBufferMemory",
"langchain.vectorstores.Chroma"
] | [((1674, 1689), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (1687, 1689), False, 'from langchain.cache import InMemoryCache\n'), ((3798, 3896), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""answer"""', 'return... |
"""Test Upstash Redis cache functionality."""
import uuid
import pytest
import langchain
from langchain.cache import UpstashRedisCache
from langchain.schema import Generation, LLMResult
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
URL = "<UPSTASH_... | [
"langchain.llm_cache.lookup",
"langchain.schema.Generation",
"langchain.llm_cache.redis.pttl",
"langchain.llm_cache.clear",
"langchain.llm_cache.redis.flushall",
"langchain.llm_cache._key"
] | [((436, 473), 'pytest.mark.requires', 'pytest.mark.requires', (['"""upstash_redis"""'], {}), "('upstash_redis')\n", (456, 473), False, 'import pytest\n'), ((809, 846), 'pytest.mark.requires', 'pytest.mark.requires', (['"""upstash_redis"""'], {}), "('upstash_redis')\n", (829, 846), False, 'import pytest\n'), ((2491, 252... |
"""Test Upstash Redis cache functionality."""
import uuid
import pytest
import langchain
from langchain.cache import UpstashRedisCache
from langchain.schema import Generation, LLMResult
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
URL = "<UPSTASH_... | [
"langchain.llm_cache.lookup",
"langchain.schema.Generation",
"langchain.llm_cache.redis.pttl",
"langchain.llm_cache.clear",
"langchain.llm_cache.redis.flushall",
"langchain.llm_cache._key"
] | [((436, 473), 'pytest.mark.requires', 'pytest.mark.requires', (['"""upstash_redis"""'], {}), "('upstash_redis')\n", (456, 473), False, 'import pytest\n'), ((809, 846), 'pytest.mark.requires', 'pytest.mark.requires', (['"""upstash_redis"""'], {}), "('upstash_redis')\n", (829, 846), False, 'import pytest\n'), ((2491, 252... |
'''
Create Vector Store from all documents in a folder, currently supports .pptx, .docx, .pdf files.
Created by Ric Zhou on 2021-03-27
'''
from langchain.document_loaders import (UnstructuredPowerPointLoader, UnstructuredWordDocumentLoader, PyPDFLoader, UnstructuredPDFLoader)
import glob
import langchain.text_splitte... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.document_loaders.UnstructuredWordDocumentLoader",
"langchain.document_loaders.UnstructuredPowerPointLoader",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.document_loaders.PyPDFLoader",
"langchain.vectorstores.FAISS.save_local"
] | [((604, 617), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (615, 617), False, 'from dotenv import load_dotenv\n'), ((618, 633), 'GlobalClasses.GlobalContext', 'GlobalContext', ([], {}), '()\n', (631, 633), False, 'from GlobalClasses import GlobalContext\n'), ((939, 1017), 'langchain.text_splitter.RecursiveCha... |
import os
import key
import tabulate
# Set API key
os.environ["OPENAI_API_KEY"] = key.OPENAI_API_KEY
# Import langchain
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langc... | [
"langchain.indexes.VectorstoreIndexCreator",
"langchain.chat_models.ChatOpenAI",
"langchain.document_loaders.CSVLoader"
] | [((465, 508), 'langchain.document_loaders.CSVLoader', 'CSVLoader', ([], {'file_path': 'file', 'encoding': '"""utf-8"""'}), "(file_path=file, encoding='utf-8')\n", (474, 508), False, 'from langchain.document_loaders import CSVLoader\n'), ((708, 735), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature':... |
import openai
import langchain as lc
from langchain.llms import OpenAI
import gradio as gr
# 设置OpenAI API密钥
openai.api_key = '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'
# 初始化Langchain的OpenAI LLM
llm = OpenAI(api_key=openai.api_key)
# 定义一个函数来处理上传的文档并生成响应
def process_document(document):
# 这里可以添加代码来处理文档,... | [
"langchain.llms.OpenAI"
] | [((213, 243), 'langchain.llms.OpenAI', 'OpenAI', ([], {'api_key': 'openai.api_key'}), '(api_key=openai.api_key)\n', (219, 243), False, 'from langchain.llms import OpenAI\n'), ((508, 536), 'gradio.inputs.File', 'gr.inputs.File', ([], {'label': '"""上传文档"""'}), "(label='上传文档')\n", (522, 536), True, 'import gradio as gr\n'... |
import os
import pandas as pd
import math
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain import OpenAI, VectorDBQA, OpenAI
from langchain.llms import OpenAIChat
from langchain.document_loaders im... | [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.vectorstores.Chroma.from_documents",
"langchain.document_loaders.DataFrameLoader",
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.OpenAI"
] | [((527, 555), 'sys.modules.pop', 'sys.modules.pop', (['"""pysqlite3"""'], {}), "('pysqlite3')\n", (542, 555), False, 'import sys\n'), ((558, 587), 'streamlit.title', 'st.title', (['"""GPT module (TEST)"""'], {}), "('GPT module (TEST)')\n", (566, 587), True, 'import streamlit as st\n'), ((606, 660), 'streamlit.text_inpu... |
# Python built-in module
import os
import time
import json
# Python installed module
import tiktoken
import langchain
from spacy.lang.en import English
class SentencizerSplitter(object):
def __init__(self, config_dict):
self.total_tokens = config_dict["embedding"]["total_tokens"]
self.approx_tota... | [
"langchain.schema.document.Document"
] | [((511, 520), 'spacy.lang.en.English', 'English', ([], {}), '()\n', (518, 520), False, 'from spacy.lang.en import English\n'), ((586, 653), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (["config_dict['embedding']['model_name']"], {}), "(config_dict['embedding']['model_name'])\n", (613, 653), False, 'im... |
import os
import json
import openai
from utils import *
import random
import langchain
from langchain import PromptTemplate
from langchain.llms import OpenAI, OpenAIChat
from langchain import LLMChain
from re import compile
from datetime import datetime
from typing import NamedTuple
from openai import Embedding
#set ... | [
"langchain.LLMChain",
"langchain.llms.OpenAIChat",
"langchain.PromptTemplate"
] | [((1869, 1883), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1881, 1883), False, 'from datetime import datetime\n'), ((2826, 2890), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_text', 'input_variables': "['Memory']"}), "(template=prompt_text, input_variables=['Memory'])\n", (28... |
# Copyright (c) Khulnasoft Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llmk 2 Community License Agreement.
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class ... | [
"langchain.llms.Replicate"
] | [((1513, 1619), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llmk2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llmk2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1522, 1619), False, 'from langchain.llms import... |
import os
import langchain
from config import *
from util import *
from langchain.llms import OpenAI, Cohere, HuggingFaceHub
from langchain.chat_models import ChatOpenAI
from langchain.agents import AgentType, initialize_agent, load_tools
from typing import Optional, Type
from langchain.callbacks.manager import AsyncCa... | [
"langchain.llms.OpenAI",
"langchain.agents.initialize_agent",
"langchain.chat_models.ChatOpenAI",
"langchain.agents.Tool"
] | [((786, 807), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (792, 807), False, 'from langchain.llms import OpenAI, Cohere, HuggingFaceHub\n'), ((815, 840), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (825, 840), False, 'fro... |
import langchain
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI, OpenAI
from langchain.schema import HumanMessage, AIMessage, SystemMessage
from langchain.prompts import PromptTemplate, FewShotPromptTemplate
from langchain.output_parsers import CommaSeparatedListOutputParser
from langchain.cache... | [
"langchain.prompts.PromptTemplate",
"langchain_openai.ChatOpenAI",
"langchain.cache.InMemoryCache"
] | [((423, 436), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (434, 436), False, 'from dotenv import load_dotenv\n'), ((459, 474), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (472, 474), False, 'from langchain.cache import InMemoryCache\n'), ((508, 541), 'langchain_openai.ChatOpenAI', 'Ch... |
import json
from pathlib import Path
from typing import Dict, List
import langchain
import numpy as np
import typer
from langchain.cache import SQLiteCache
from langchain.llms import OpenAI
from tqdm import tqdm
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
def _is_daster_empl(title: str) -> bool... | [
"langchain.llms.OpenAI",
"langchain.cache.SQLiteCache"
] | [((236, 278), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (247, 278), False, 'from langchain.cache import SQLiteCache\n'), ((905, 952), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.1)', 'model_name': '"""gpt-4-32k"""'}... |
import langchain.vectorstores.opensearch_vector_search as ovs
from opensearchpy import OpenSearch, RequestsHttpConnection, AWSV4SignerAuth, helpers
from langchain.vectorstores import OpenSearchVectorSearch
def create_ovs_client(
collection_id,
index_name,
region,
boto3_session,
bedrock_embeddings... | [
"langchain.vectorstores.OpenSearchVectorSearch"
] | [((470, 515), 'opensearchpy.AWSV4SignerAuth', 'AWSV4SignerAuth', (['credentials', 'region', 'service'], {}), '(credentials, region, service)\n', (485, 515), False, 'from opensearchpy import OpenSearch, RequestsHttpConnection, AWSV4SignerAuth, helpers\n'), ((543, 724), 'opensearchpy.OpenSearch', 'OpenSearch', ([], {'hos... |
"""
.. warning::
Beta Feature!
**Cache** provides an optional caching layer for LLMs.
Cache is useful for two reasons:
- It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
- It can speed up your application by redu... | [
"langchain.schema.Generation",
"langchain.utils.get_from_env",
"langchain.load.load.loads",
"langchain.load.dump.dumps"
] | [((1586, 1613), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (1603, 1613), False, 'import logging\n'), ((5793, 5811), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (5809, 5811), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), (... |
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Sequence
from pydantic import Field, root_validator
import langchain
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.... | [
"langchain.llm_cache.lookup",
"langchain.schema.messages.HumanMessage",
"langchain.schema.messages.AIMessage",
"langchain.schema.ChatGeneration",
"langchain.load.dump.dumps",
"langchain.schema.RunInfo",
"langchain.llm_cache.update",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"lang... | [((923, 960), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (928, 960), False, 'from pydantic import Field, root_validator\n'), ((1034, 1067), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (103... |
from langchain.chains.router import MultiPromptChain
from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.chains import LLMChain
from ap... | [
"langchain.chains.LLMChain",
"langchain.prompts.ChatPromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.chains.router.llm_router.RouterOutputParser",
"langchain.chains.router.MultiPromptChain",
"langchain.chains.router.llm_router.LLMRouterChain.from_llm"
] | [((3977, 4033), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': 'ChatGPTModel.GPT3.value'}), '(temperature=0, model=ChatGPTModel.GPT3.value)\n', (3987, 4033), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4531, 4574), 'langchain.prompts.ChatPromptTemplate.from_templa... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
Type,
Type... | [
"langchain.callbacks.tracers.langchain_v1.LangChainTracerV1",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.schema.get_buffer_string",
"langchain.callbacks.tracers.langchain.LangC... | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((1286, 1329), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1296, 1329), False, 'from contextvars i... |
import argparse
import json
import logging
import os
import pathlib
from typing import Dict, List, Union, Optional
import langchain
import pandas as pd
import tiktoken
import wandb
from langchain import LLMChain, FAISS
from langchain.cache import SQLiteCache
from langchain.chains import HypotheticalDocumentEmbedder
fr... | [
"langchain.chat_models.ChatOpenAI",
"langchain.document_loaders.NotebookLoader",
"langchain.cache.SQLiteCache",
"langchain.text_splitter.PythonCodeTextSplitter",
"langchain.text_splitter.MarkdownTextSplitter",
"langchain.document_loaders.UnstructuredMarkdownLoader",
"langchain.embeddings.OpenAIEmbedding... | [((902, 943), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '"""langchain.db"""'}), "(database_path='langchain.db')\n", (913, 943), False, 'from langchain.cache import SQLiteCache\n'), ((954, 981), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (971, 981), False, 'i... |
import langchain
from dotenv import load_dotenv
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from rmrkl import ChatZeroShotAgent, RetryAgentExecutor
from .prompt import FORMAT_INSTRUCTIONS, QUESTION_PROMPT, SUFFIX
from .tools import make_tools, Doc, Text,search_texts, load_texts
imp... | [
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler"
] | [((329, 342), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (340, 342), False, 'from dotenv import load_dotenv\n'), ((2064, 2077), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2074, 2077), False, 'import time\n'), ((1632, 1784), 'rmrkl.ChatZeroShotAgent.from_llm_and_tools', 'ChatZeroShotAgent.from_llm_... |
import os
import json
import time
from typing import List
import faiss
import pypdf
import random
import itertools
import text_utils
import pandas as pd
import altair as alt
import streamlit as st
from io import StringIO
from llama_index import Document
from langchain.llms import Anthropic
from langchain.chains import ... | [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.chat_models.ChatOpenAI",
"langchain.retrievers.SVMRetriever.from_texts",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.Anthropic",
"langchain.vectorstores.FAISS.from_texts",
"langchain.embeddings.HuggingFaceEmbedding... | [((13312, 13350), 'streamlit.sidebar.image', 'st.sidebar.image', (['"""img/diagnostic.jpg"""'], {}), "('img/diagnostic.jpg')\n", (13328, 13350), True, 'import streamlit as st\n'), ((15130, 15159), 'streamlit.header', 'st.header', (['"""`Auto-evaluator`"""'], {}), "('`Auto-evaluator`')\n", (15139, 15159), True, 'import ... |
# general imports
from constants import *
# streamlit imports
import streamlit as st
from utils import *
from streamlit_lottie import st_lottie
# llama index imports
import openai
from llama_index import (
VectorStoreIndex,
download_loader,
ServiceContext,
set_global_service_context,
)
from llama_inde... | [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings"
] | [((1017, 1080), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""', 'system_prompt': 'system_prompt'}), "(model='gpt-4-1106-preview', system_prompt=system_prompt)\n", (1023, 1080), False, 'from llama_index.llms import OpenAI\n'), ((1187, 1248), 'llama_index.ServiceContext.from_defaults', 'Se... |
#%%
import pandas as pd
from utils import get_random_string
from dotenv import load_dotenv
import os
import langchain
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from openai import OpenAI
import json
import requests
import datetime
import... | [
"langchain.prompts.PromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI"
] | [((347, 360), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (358, 360), False, 'from dotenv import load_dotenv\n'), ((368, 416), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model='gpt-3.5-turbo', temperature=0)\n", (378, 416), False, 'from l... |
import os
#from dotenv import load_dotenv
import openai
import langchain
os.environ["OPENAI_API_KEY"] =""
os.environ["SQL_SERVER_USERNAME"] = ""
os.environ["SQL_SERVER_ENDPOINT"] = ""
os.environ["SQL_SERVER_PASSWORD"] = ""
os.environ["SQL_SERVER_DATABASE"] = ""
from sqlalchemy import create_engine
from sqlalchemy.... | [
"langchain.sql_database.SQLDatabase.from_uri",
"langchain.agents.create_sql_agent",
"langchain.agents.agent_toolkits.SQLDatabaseToolkit",
"langchain.llms.OpenAI"
] | [((785, 808), 'sqlalchemy.engine.url.URL.create', 'URL.create', ([], {}), '(**db_config)\n', (795, 808), False, 'from sqlalchemy.engine.url import URL\n'), ((814, 842), 'langchain.sql_database.SQLDatabase.from_uri', 'SQLDatabase.from_uri', (['db_url'], {}), '(db_url)\n', (834, 842), False, 'from langchain.sql_database ... |
"""A tracer that runs evaluators over completed runs."""
from __future__ import annotations
import logging
import threading
import weakref
from concurrent.futures import Future, ThreadPoolExecutor, wait
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast
from uuid import UUID
import langsmith
f... | [
"langchain.callbacks.tracers.langchain.get_client",
"langchain.callbacks.manager.tracing_v2_enabled",
"langchain.callbacks.tracers.langchain._get_executor"
] | [((672, 699), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (689, 699), False, 'import logging\n'), ((755, 772), 'weakref.WeakSet', 'weakref.WeakSet', ([], {}), '()\n', (770, 772), False, 'import weakref\n'), ((3430, 3447), 'weakref.WeakSet', 'weakref.WeakSet', ([], {}), '()\n', (3445, 3... |
import os
import re
from uuid import UUID
from typing import Any, Dict, List, Optional, Union
import asyncio
import langchain
import streamlit as st
from langchain.schema import LLMResult
from langchain.chat_models import ChatOpenAI
from langchain.agents import Tool
from langchain.agents import AgentType
from langcha... | [
"langchain.agents.initialize_agent",
"langchain.chat_models.ChatOpenAI",
"langchain.llms.OpenAI",
"langchain.memory.ConversationBufferMemory",
"langchain.agents.Tool"
] | [((815, 826), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (824, 826), False, 'import os\n'), ((6031, 6120), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'openai_api_key': 'openai_api_key'}), "(model_name='gpt-3.5-turbo', temperature=0, openai_api_key... |
import os
import weaviate
import key_config
import langchain
from langchain.vectorstores import Weaviate
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationSummaryMemory
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
clien... | [
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.chat_models.ChatOpenAI",
"langchain.vectorstores.Weaviate",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.memory.ConversationSummaryMemory"
] | [((438, 486), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""'}), "(model='text-embedding-ada-002')\n", (454, 486), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((496, 566), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""g... |
from approaches.index.store.cosmos_index_store import CosmosIndexStore
from llama_index import StorageContext
from approaches.index.store.cosmos_doc_store import CosmosDocumentStore
from llama_index import load_index_from_storage
import os
import openai
from langchain.chat_models import AzureChatOpenAI
from langchain.... | [
"langchain.embeddings.OpenAIEmbeddings"
] | [((832, 845), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (843, 845), False, 'from dotenv import load_dotenv\n'), ((1039, 1074), 'os.environ.get', 'os.environ.get', (['"""AZURE_OPENAI_BASE"""'], {}), "('AZURE_OPENAI_BASE')\n", (1053, 1074), False, 'import os\n'), ((1098, 1153), 'os.environ.get', 'os.environ.... |
from abc import ABC, abstractmethod
from typing import List, Optional
from pydantic import BaseModel, Extra, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from langchain.callbacks.base import BaseCallbackManager
from langchain.schema import (
AIMessage,
BaseLanguageMod... | [
"langchain.schema.AIMessage",
"langchain.schema.ChatGeneration",
"langchain.schema.HumanMessage",
"langchain.schema.ChatResult",
"langchain.schema.LLMResult",
"langchain.callbacks.get_callback_manager"
] | [((568, 605), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (573, 605), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((696, 739), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager'}), '(default_factory=get_ca... |
import logging
import os
import pprint
import uuid
from typing import List
import chromadb
import gradio as gr
import requests
import zhipuai
from bs4 import BeautifulSoup
from dotenv import load_dotenv, find_dotenv
# Import langchain stuff
from langchain.chains import ConversationalRetrievalChain
from langchain.docum... | [
"langchain.text_splitter.CharacterTextSplitter",
"langchain_community.vectorstores.chroma.Chroma.from_documents",
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain_core.prompts.PromptTemplate",
"langchain_community.embeddings.HuggingFaceEmbeddings",
"langchain.memory.ConversationBufferM... | [((1392, 1490), 'llms.zhipuai_llm.ZhipuAILLM', 'ZhipuAILLM', ([], {'model': '"""chatglm_turbo"""', 'temperature': '(0.9)', 'top_p': '(0.1)', 'zhipuai_api_key': 'zhipuai.api_key'}), "(model='chatglm_turbo', temperature=0.9, top_p=0.1,\n zhipuai_api_key=zhipuai.api_key)\n", (1402, 1490), False, 'from llms.zhipuai_llm ... |
"""An example of how to test Python code generating prompts"""
import re
# Brining some "prompt generator" classes
from promptimize.prompt_cases import LangchainPromptCase
# Bringing some useful eval function that help evaluating and scoring responses
# eval functions have a handle on the prompt object and are expect... | [
"langchain.output_parsers.ResponseSchema",
"langchain.output_parsers.StructuredOutputParser.from_response_schemas",
"langchain.PromptTemplate"
] | [((1146, 1208), 'langchain.output_parsers.StructuredOutputParser.from_response_schemas', 'StructuredOutputParser.from_response_schemas', (['response_schemas'], {}), '(response_schemas)\n', (1190, 1208), False, 'from langchain.output_parsers import StructuredOutputParser, ResponseSchema\n'), ((2218, 2382), 'langchain.Pr... |
"""
The ``mlflow.langchain`` module provides an API for logging and loading LangChain models.
This module exports multivariate LangChain models in the langchain flavor and univariate
LangChain models in the pyfunc flavor:
LangChain (native) format
This is the main flavor that can be accessed with LangChain APIs.
:... | [
"langchain.chains.loading.load_chain",
"langchain.agents.initialize_agent"
] | [((2012, 2046), 'logging.getLogger', 'logging.getLogger', (['mlflow.__name__'], {}), '(mlflow.__name__)\n', (2029, 2046), False, 'import logging\n'), ((11731, 11807), 'mlflow.utils.environment._validate_env_arguments', '_validate_env_arguments', (['conda_env', 'pip_requirements', 'extra_pip_requirements'], {}), '(conda... |
# Import the necessary libraries
import random
import time
from llama_index.llms import OpenAI
import streamlit as st
from llama_index import VectorStoreIndex, ServiceContext, StorageContext, set_global_service_context
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings import... | [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings",
"langchain_openai.ChatOpenAI"
] | [((855, 895), 'streamlit.title', 'st.title', (['"""🦜🔗 Tourism Assistant Chatbot"""'], {}), "('🦜🔗 Tourism Assistant Chatbot')\n", (863, 895), True, 'import streamlit as st\n'), ((5721, 5781), 'llama_index.set_global_service_context', 'set_global_service_context', (['st.session_state.service_context'], {}), '(st.sess... |
# This code sets up the necessary components, interacts with the LangChain tool and ChatOpenAI model to perform text summarization,
# and provides a user interface for input and output.
from langchain.document_loaders import UnstructuredFileLoader # Importing necessary modules
from langchain.document_loaders import ... | [
"langchain.chat_models.ChatOpenAI",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.document_loaders.UnstructuredFileLoader",
"langchain.prompts.PromptTemplate",
"langchain.document_loaders.UnstructuredPDFLoader",
"langchain.chains.summarize.load_summarize_chain"
] | [((5769, 5891), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Positive summarizer"""', 'page_icon': '"""📖"""', 'layout': '"""wide"""', 'initial_sidebar_state': '"""collapsed"""'}), "(page_title='Positive summarizer', page_icon='📖', layout=\n 'wide', initial_sidebar_state='collapsed')\n... |
import streamlit as st
from streamlit_chat import message
import pandas as pd
from langchain.llms import OpenAI
import os
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationSummaryBufferMemory
import plotly.express
from streamlit_searchbox import st_searchbox
from typing import List, ... | [
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((1329, 1342), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1340, 1342), False, 'from dotenv import load_dotenv\n'), ((1378, 1486), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""PubMeta.ai"""', 'page_icon': '"""⚕️"""', 'layout': '"""wide"""', 'initial_sidebar_state': '"""auto"""... |
from typing import Any, Dict, List, Optional
from langchain import PromptTemplate ,LLMChain
import langchain
from langchain.chat_models import ChatOpenAI ,AzureChatOpenAI
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
import sys
import re
import argparse
import os
print(sys.path)
sys.p... | [
"langchain.prompts.chat.ChatPromptTemplate",
"langchain.prompts.chat.HumanMessagePromptTemplate.from_template",
"langchain.PromptTemplate",
"langchain.schema.SystemMessage",
"langchain.LLMChain",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler"
] | [((315, 335), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (330, 335), False, 'import sys\n'), ((3893, 3960), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['essay']"}), "(template=prompt_template, input_variables=['essay'])\n", (3907, 3960... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.