Tuesday, April 30, 2024

Most Easy Way to Implement RAG with AI Model

 This video is a simple hands-on tutorial to show how to implement RAG with your own files locally by using Cohere Comand R+ model.




Code:


#pip install cohere

import cohere
import os

api_key = os.getenv('COHERE_API_KEY')
co = cohere.Client(api_key)

def read_file(filename):
    try:
        with open(filename, 'r') as file:
            content = file.read()
        return content
    except FileNotFoundError:
        print(f"{filename} not found in the current directory.")
        return None
   
def generate_rag_response_with_citations(query, documents):
    response = co.chat(
        model="command-r",
        message=query,
        documents=documents
    )  

    # Extracting text and citations from the response
    response_text = response.text
    citations = response.citations

    return response_text, citations

filename1 = 'file1.txt'
file_content1 = read_file(filename1)

filename2 = 'file2.txt'
file_content2 = read_file(filename2)


documents = [
    { "title": "Whois", "text": file_content1},
    { "title": "Location", "text": file_content2}
]

#query = "Where Fahd Mirza lives?"
query = "Who is Fahd Mirza?"

text,citation=generate_rag_response_with_citations(query,documents)
print(text)
print(citation)

No comments: