Integrations

Dynamic provisioning out-of-the-box

LangChain

You can use the with statement to use a dedicated LLM. The DedicatedLLM class which automatically shuts down your instance as soon as the with block is left.

from cortecs_py import Cortecs
from cortecs_py.integrations import DedicatedLLM

cortecs = Cortecs()

with DedicatedLLM(cortecs, 'neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8') as llm:
    essay = llm.invoke('Write an essay about dynamic provisioning')
    print(essay.content)

CrewAI

Install crewAI

pip install crewai crewai-tools

Using DedicatedCrewBase in combination with DedicatedCrew allows you to easily run dynamically provisioned crews. For more information see the crew docs.

from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task

from cortecs_py.integrations import DedicatedCrew, DedicatedCrewBase

@DedicatedCrewBase
class ExampleCrew():

	@agent
	def researcher(self) -> Agent:
		return Agent(
			config=self.agents_config['researcher'],
			# tools=[MyCustomTool()], # Example of custom tool, loaded on the beginning of file
			verbose=True
		)

	@agent
	def reporting_analyst(self) -> Agent:
		return Agent(
			config=self.agents_config['reporting_analyst'],
			verbose=True
		)

	@task
	def research_task(self) -> Task:
		return Task(
			config=self.tasks_config['research_task'],
		)

	@task
	def reporting_task(self) -> Task:
		return Task(
			config=self.tasks_config['reporting_task'],
			output_file='report.md'
		)

	@crew
	def crew(self) -> Crew:
		return DedicatedCrew(
			instance_id=self.instance_id,
			client=self.client,
			agents=self.agents, 
			tasks=self.tasks,
			process=Process.sequential
		)

Last updated