Friday, July 7, 2023

LLM Caching Using LangChain - Step by Step Tutorial

 



! pip install openai

! pip install langchain

! pip install yt_dlp

! pip install pydub


import langchain

import time

import os

import openai

import sys

sys.path.append('../..')


from dotenv import load_dotenv, find_dotenv

_ = load_dotenv(find_dotenv()) # read local .env file


openai.api_key = ('sk-6kPZK2gbUUEsv2NY')


from langchain.document_loaders import WebBaseLoader

from langchain.llms import OpenAI

from langchain.callbacks import get_openai_callback

from langchain.cache import InMemoryCache


langchain.llm_cache=InMemoryCache()


loader = WebBaseLoader("https://www.fahdmirza.com/2023/07/installing-draggan-on-linux-locally.html")


print("------------------------------------------------")

start_time=time.time()

docs = loader.load()

print(docs[0].page_content[:100])

print(docs[0].metadata)

end_time=time.time()

print(f"Time Taken:{end_time-start_time:0.2f} seconds")

print("------------------------------------------------")


No comments: