Friday, July 12, 2024

Install LightRAG Locally - Moduler RAG and Lightning Library for LLM Apps

This video is a step-by-step easy tutorial to install LightRAG which helps developers with both building and optimizing Retriever-Agent-Generator pipelines. It is light, modular, and robust, with a 100% readable codebase.


Code:

conda create -n lightrag python=3.11 -y && conda activate lightrag

git clone https://github.com/SylphAI-Inc/LightRAG.git && cd LightRAG

pip install lightrag
pip install openai==1.12.0
pip install faiss-cpu==1.8.0
pip install sqlalchemy==2.0.30
pip install pgvector==0.2.5
pip install groq==0.5.0

mv .env_example .env   #set your openai and groq api keys in .env file.


conda install jupyter -y
pip uninstall charset_normalizer -y
pip install charset_normalizer
jupyter notebook

from lightrag.utils import setup_env
setup_env()



from dataclasses import dataclass, field

from lightrag.core import Component, Generator, DataClass
from lightrag.components.model_client import GroqAPIClient
from lightrag.components.output_parsers import JsonOutputParser

@dataclass
class QAOutput(DataClass):
    explanation: str = field(
        metadata={"desc": "A brief explanation of the concept in one sentence."}
    )
    example: str = field(metadata={"desc": "An example of the concept in a sentence."})



qa_template = r"""<SYS>
You are a helpful assistant.
<OUTPUT_FORMAT>
{{output_format_str}}
</OUTPUT_FORMAT>
</SYS>
User: {{input_str}}
You:"""

class QA(Component):
    def __init__(self):
        super().__init__()

        parser = JsonOutputParser(data_class=QAOutput, return_data_class=True)
        self.generator = Generator(
            model_client=GroqAPIClient(),
            model_kwargs={"model": "llama3-8b-8192"},
            template=qa_template,
            prompt_kwargs={"output_format_str": parser.format_instructions()},
            output_processors=parser,
        )

    def call(self, query: str):
        return self.generator.call({"input_str": query})

    async def acall(self, query: str):
        return await self.generator.acall({"input_str": query})
       


qa = QA()
print(qa)

# call
output = qa("What is LLM?")
print(output)


qa.generator.print_prompt(
        output_format_str=qa.generator.output_processors.format_instructions(),
        input_str="What is LLM?",
)

No comments: