Install Librechat Locally with Ollama to Chat and RAG with Any Model

 This video shows how to locally install LibreChat and integrate it with Ollama.  You can use Llama 3 , Mistral or any model to chat privately and locally with web GUI. 


Code:



conda create -n librechat python=3.11
conda activate librechat

git clone https://github.com/danny-avila/LibreChat.git
cd LibreChat

cp .env.example .env

sudo systemctl stop ollama.service

docker compose up -d

docker compose ps --format "{{.Service}} {{.State}}"
 
http://localhost:3080/

================

librechat.yaml

    - name: "Ollama"
      apiKey: "ollama"
      # use 'host.docker.internal' instead of localhost if running LibreChat in a docker container
      baseURL: "http://host.docker.internal:11434/v1/"
      models:
        default: [
          "mistral"
          ]
      # fetching list of models is supported but the `name` field must start
      # with `ollama` (case-insensitive), as it does in this example.
        fetch: false
      titleConvo: true
      titleModel: "mistral"
      summarize: false
      summaryModel: "mistral"
      forcePrompt: false
      modelDisplayLabel: "Ollama"


==================
docker-compose-Override.yaml file

services:

# # USE LIBRECHAT CONFIG FILE
 api:
   volumes:
   - type: bind
     source: ./librechat.yaml
     target: /app/librechat.yaml


# # ADD OLLAMA
 ollama:
   image: ollama/ollama:latest
   deploy:
     resources:
       reservations:
         devices:
           - driver: nvidia
             capabilities: [compute, utility]
   ports:
     - "11434:11434"
   volumes:
     - ./ollama:/root/.ollama

Post a Comment

Previous Post Next Post