Tuesday, May 21, 2024

Phi-3 Vision Model - Step by Step Installation

 This video is a step-by-step tutorial to locally install Microsoft Phi-3-vision-128k-instruct LLM and then talk to your images.



Code:

pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers.

pip install flash_attn==2.5.8
pip install numpy==1.24.4
pip install Pillow==10.3.0
pip install Requests==2.31.0
pip install torch==2.3.0
pip install torchvision==0.18.0

from PIL import Image
import requests
from transformers import AutoModelForCausalLM
from transformers import AutoProcessor

model_id = "microsoft/Phi-3-vision-128k-instruct"

model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cuda", trust_remote_code=True, torch_dtype="auto")

processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)

prompt="What is shown in this image?"

messages = [
    {"role": "user", "content": "<|image_1|>\nWhat is shown in this image?"},
    {"role": "assistant", "content": ""}
]

url = "https://assets-c4akfrf5b4d3f4b7.z01.azurefd.net/assets/2024/04/BMDataViz_661fb89f3845e.png"

image = Image.open('4a.png').convert('RGB')
OR
image = Image.open(requests.get(url, stream=True).raw)

prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)

inputs = processor(prompt, [image], return_tensors="pt").to("cuda:0")

generation_args = {
    "max_new_tokens": 500,
    "temperature": 0.0,
    "do_sample": False,
}

generate_ids = model.generate(**inputs, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)

# remove input tokens
generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]

print(response)

No comments: