Batch jobs
Everything All at Once
Example
with DedicatedLLM(client=cortecs, model_name='<MDOEL_NAME>') as llm:
chain = ... | llm
summaries = chain.batch([{...} for doc in docs])from langchain_community.document_loaders import ArxivLoader
from langchain_core.prompts import ChatPromptTemplate
from cortecs_py.client import Cortecs
from cortecs_py.integrations.langchain import DedicatedLLM
cortecs = Cortecs()
loader = ArxivLoader(
query="reasoning",
load_max_docs=40,
get_ful_documents=True,
doc_content_chars_max=25000, # ~6.25k tokens, make sure the models supports that context length
load_all_available_meta=False
)
prompt = ChatPromptTemplate.from_template("{text}\n\n Explain to me like I'm five:")
docs = loader.load()
with DedicatedLLM(client=cortecs, model_name='cortecs/phi-4-FP8-Dynamic') as llm:
chain = prompt | llm
print("Processing data batch-wise ...")
summaries = chain.batch([{"text": doc.page_content} for doc in docs])
for summary in summaries:
print(summary.content + '-------\n\n\n')
Last updated