Monday, June 5, 2023

Source Code for Creating LLM on Laptop

 This is the collection of open source code snippets in Python which you can use to download, finetune, and generate content with LLM on your laptop:

import tensorflow as tf
import numpy as np
import os
import json
import random
import time
import argparse

with open(os.path.join(args.model_path, "hyperparams.json"), "r") as f:
    hyperparams = json.load(f)
    model = tf.compat.v1.estimator.Estimator( model_fn=model_fn, model_dir=args.output_path, params=hyperparams,
            config=tf.compat.v1.estimator.RunConfig( save_checkpoints_steps=5000, keep_checkpoint_max=10,
            save_summary_steps=5000 ))

#Define the input function for the dataset

def input_fn(mode):
    dataset = tf.data.TextLineDataset(args.dataset_path)
    dataset = dataset.repeat()
    dataset = dataset.shuffle(buffer_size=10000)
    dataset = dataset.batch(args.batch_size)
    dataset = dataset.map(lambda x: tf.strings.substr(x, 0, hparams["n_ctx"]))
    iterator = dataset.make_one_shot_iterator()
return iterator.get_next()


#Define the training function

def train():
    for epoch in range(args.epochs):
        model.train(input_fn=lambda: input_fn(tf.estimator.ModeKeys.TRAIN))
        print(f"Epoch {epoch+1} completed.")

#Start the training

train()

#Define the command-line arguments

parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, required=True)
parser.add_argument("--length", type=int, default=110)
parser.add_argument("--temperature", type=float, default=0.8)
args = parser.parse_args()

#Load the fine-tuned model

with open(os.path.join(args.model_path, "hyperparams.json"), "r") as f:
    hyperparams = json.load(f)
    model_fn = model_fn(hyperparams, tf.estimator.ModeKeys.PREDICT)
    model = tf.compat.v1.estimator.Estimator(model_fn=model_fn, model_dir=args.model_path, params=hyperparams)

#Define the generation function


def generate_text(length, temperature):
    start_token = "<|startoftext|>"
    tokens = tokenizer.convert_tokens_to_ids([start_token])
    token_length = len(tokens)
    while token_length < length:
        prediction_input = np.array(tokens[-hyperparams["n_ctx"]:])
        output = list(model.predict(input_fn=lambda: [[prediction_input]]))[0]["logits"]
        logits = output[-1] / temperature
        logits = logits - np.max(logits)
        probs = np.exp(logits) / np.sum(np.exp(logits))
        token = np.random.choice(range(hyperparams["n_vocab"]), p=probs)
        tokens.append(token)
        token_length += 1
        output_text = tokenizer.convert_ids_to_tokens(tokens)
        output_text = "".join(output_text).replace("▁", " ")
        output_text = output_text.replace(start_token, "")
return output_text

#Generate text

text = generate_text(args.length, args.temperature)
print(text)

No comments: