This video is a step-by-step tutorial to locally install BeyondLLM with Ollama. Beyond LLM offers an all-in-one toolkit for experimentation, evaluation, and deployment of Retrieval-Augmented Generation (RAG) systems.
Code:
conda create -n beyondllm python=3.11
pip install beyondllm
pip install llama-index-embeddings-huggingface
pip install ollama
from beyondllm.source import fit
data = fit(path="mypdf.pdf", dtype="pdf")
from beyondllm.embeddings import HuggingFaceEmbeddings
embed_model = HuggingFaceEmbeddings(model_name="BAAI/bge-small-en-v1.5")
from beyondllm.retrieve import auto_retriever
retriever = auto_retriever(
data=data,
embed_model=embed_model,
type="normal",
top_k=5
)
from beyondllm.llms import OllamaModel
llm = OllamaModel(model="mistral")
user_prompt = "who is fahd mirza?"
system_prompt = "You are an AI assistant...."
from beyondllm import generator
pipeline = generator.Generate(question=user_prompt, system_prompt = system_prompt, llm = llm, retriever=retriever)
print(pipeline.call())
print(pipeline.get_context_relevancy())
print(pipeline.get_answer_relevancy())
print(pipeline.get_groundedness())
print(pipeline.get_rag_triad_evals())
No comments:
Post a Comment