Skip to content

Optimizers

Module for prompt optimizers.

get_optimizer(config=None, optimizer=None, meta_prompt=None, task_description=None, *args, **kwargs)

Factory function to create and return an optimizer instance based on the provided configuration.

This function selects and instantiates the appropriate optimizer class based on the 'optimizer' field in the config object. Alternatively you can pass the relevant parameters. It supports three types of optimizers: 'dummy', 'evopromptde', 'evopromptga', and 'opro'.

Parameters:

Name Type Description Default
config Config

Configuration object containing the optimizer type.

None
optimizer str

Identifier for the optimizer to use. Special cases: - "dummy" for DummyOptimizer - Any other string for the specified optimizer class

None
include_task_desc bool

Flag to include task description in the prompt.

required
meta_prompt str

Meta prompt for the optimizer.

None
task_description str

Task description for the optimizer.

None
*args

Variable length argument list passed to the optimizer constructor.

()
**kwargs

Arbitrary keyword arguments passed to the optimizer constructor

{}

Returns:

Type Description

An instance of the specified optimizer class.

Raises:

Type Description
ValueError

If an unknown optimizer type is specified in the config.

Source code in promptolution/optimizers/__init__.py
def get_optimizer(
    config=None, optimizer: str = None, meta_prompt: str = None, task_description: str = None, *args, **kwargs
):
    """Factory function to create and return an optimizer instance based on the provided configuration.

    This function selects and instantiates the appropriate optimizer class based on the
    'optimizer' field in the config object. Alternatively you can pass the relevant parameters.
    It supports three types of optimizers: 'dummy', 'evopromptde', 'evopromptga', and 'opro'.

    Args:
        config (Config): Configuration object containing the optimizer type.
        optimizer (str): Identifier for the optimizer to use. Special cases:
                         - "dummy" for DummyOptimizer
                         - Any other string for the specified optimizer class
        include_task_desc (bool): Flag to include task description in the prompt.
        meta_prompt (str): Meta prompt for the optimizer.
        task_description (str): Task description for the optimizer.
        *args: Variable length argument list passed to the optimizer constructor.
        **kwargs: Arbitrary keyword arguments passed to the optimizer constructor

    Returns:
        An instance of the specified optimizer class.

    Raises:
        ValueError: If an unknown optimizer type is specified in the config.
    """
    if optimizer is None:
        optimizer = config.optimizer

    if task_description is None:
        task_description = config.task_description

    if config is not None and meta_prompt is None:
        meta_prompt = config.meta_prompt

    if optimizer == "dummy":
        return DummyOptimizer(*args, **kwargs)
    if config.optimizer == "evopromptde":
        if task_description is not None:
            return EvoPromptDE(
                prompt_template=EVOPROMPT_DE_TEMPLATE_TD.replace("<task_desc>", task_description), *args, **kwargs
            )
        return EvoPromptDE(prompt_template=EVOPROMPT_DE_TEMPLATE, *args, **kwargs)
    if config.optimizer == "evopromptga":
        if task_description is not None:
            return EvoPromptGA(
                prompt_template=EVOPROMPT_GA_TEMPLATE_TD.replace("<task_desc>", task_description), *args, **kwargs
            )
        return EvoPromptGA(prompt_template=EVOPROMPT_GA_TEMPLATE, *args, **kwargs)
    if config.optimizer == "opro":
        if task_description is not None:
            return Opro(prompt_template=OPRO_TEMPLATE_TD.replace("<task_desc>", task_description), *args, **kwargs)
        return Opro(prompt_template=OPRO_TEMPLATE, *args, **kwargs)
    raise ValueError(f"Unknown optimizer: {config.optimizer}")

base_optimizer

Base class for prompt optimizers.

BaseOptimizer

Bases: ABC

Abstract base class for prompt optimizers.

This class defines the basic structure and interface for prompt optimization algorithms. Concrete optimizer implementations should inherit from this class and implement the optimize method.

Attributes:

Name Type Description
prompts List[str]

List of current prompts being optimized.

task BaseTask

The task object used for evaluating prompts.

callbacks List[Callable]

List of callback functions to be called during optimization.

predictor

The predictor used for prompt evaluation (if applicable).

Parameters:

Name Type Description Default
initial_prompts List[str]

Initial set of prompts to start optimization with.

required
task BaseTask

Task object for prompt evaluation.

required
callbacks List[Callable]

List of callback functions. Defaults to an empty list.

[]
predictor optional

Predictor for prompt evaluation. Defaults to None.

None
Source code in promptolution/optimizers/base_optimizer.py
class BaseOptimizer(ABC):
    """Abstract base class for prompt optimizers.

    This class defines the basic structure and interface for prompt optimization algorithms.
    Concrete optimizer implementations should inherit from this class and implement
    the `optimize` method.

    Attributes:
        prompts (List[str]): List of current prompts being optimized.
        task (BaseTask): The task object used for evaluating prompts.
        callbacks (List[Callable]): List of callback functions to be called during optimization.
        predictor: The predictor used for prompt evaluation (if applicable).

    Args:
        initial_prompts (List[str]): Initial set of prompts to start optimization with.
        task (BaseTask): Task object for prompt evaluation.
        callbacks (List[Callable], optional): List of callback functions. Defaults to an empty list.
        predictor (optional): Predictor for prompt evaluation. Defaults to None.
    """

    def __init__(
        self,
        initial_prompts: list[str],
        task: BaseTask,
        callbacks: list[Callable] = [],
        predictor: BasePredictor = None,
        verbosity: int = 0,
    ):
        """Initialize the BaseOptimizer."""
        self.prompts = initial_prompts
        self.task = task
        self.callbacks = callbacks
        self.predictor = predictor
        self.verbosity = verbosity

    @abstractmethod
    def optimize(self, n_steps: int) -> List[str]:
        """Abstract method to perform the optimization process.

        This method should be implemented by concrete optimizer classes to define
        the specific optimization algorithm.

        Args:
            n_steps (int): Number of optimization steps to perform.

        Returns:
            List[str]: The optimized list of prompts after all steps.

        Raises:
            NotImplementedError: If not implemented by a concrete class.
        """
        raise NotImplementedError

    def _on_step_end(self):
        """Call all registered callbacks at the end of each optimization step."""
        continue_optimization = True
        for callback in self.callbacks:
            continue_optimization &= callback.on_step_end(self)  # if any callback returns False, end the optimization

        return continue_optimization

    def _on_epoch_end(self):
        """Call all registered callbacks at the end of each optimization epoch."""
        continue_optimization = True
        for callback in self.callbacks:
            continue_optimization &= callback.on_epoch_end(self)  # if any callback returns False, end the optimization

        return continue_optimization

    def _on_train_end(self):
        """Call all registered callbacks at the end of the entire optimization process."""
        for callback in self.callbacks:
            callback.on_train_end(self)

__init__(initial_prompts, task, callbacks=[], predictor=None, verbosity=0)

Initialize the BaseOptimizer.

Source code in promptolution/optimizers/base_optimizer.py
def __init__(
    self,
    initial_prompts: list[str],
    task: BaseTask,
    callbacks: list[Callable] = [],
    predictor: BasePredictor = None,
    verbosity: int = 0,
):
    """Initialize the BaseOptimizer."""
    self.prompts = initial_prompts
    self.task = task
    self.callbacks = callbacks
    self.predictor = predictor
    self.verbosity = verbosity

optimize(n_steps) abstractmethod

Abstract method to perform the optimization process.

This method should be implemented by concrete optimizer classes to define the specific optimization algorithm.

Parameters:

Name Type Description Default
n_steps int

Number of optimization steps to perform.

required

Returns:

Type Description
List[str]

List[str]: The optimized list of prompts after all steps.

Raises:

Type Description
NotImplementedError

If not implemented by a concrete class.

Source code in promptolution/optimizers/base_optimizer.py
@abstractmethod
def optimize(self, n_steps: int) -> List[str]:
    """Abstract method to perform the optimization process.

    This method should be implemented by concrete optimizer classes to define
    the specific optimization algorithm.

    Args:
        n_steps (int): Number of optimization steps to perform.

    Returns:
        List[str]: The optimized list of prompts after all steps.

    Raises:
        NotImplementedError: If not implemented by a concrete class.
    """
    raise NotImplementedError

DummyOptimizer

Bases: BaseOptimizer

A dummy optimizer that doesn't perform any actual optimization.

This optimizer simply returns the initial prompts without modification. It's useful for testing or as a baseline comparison.

Attributes:

Name Type Description
prompts List[str]

List of prompts (unchanged from initialization).

callbacks List[Callable]

Empty list of callbacks.

Parameters:

Name Type Description Default
initial_prompts List[str]

Initial set of prompts.

required
*args

Variable length argument list (unused).

()
**kwargs

Arbitrary keyword arguments (unused).

{}
Source code in promptolution/optimizers/base_optimizer.py
class DummyOptimizer(BaseOptimizer):
    """A dummy optimizer that doesn't perform any actual optimization.

    This optimizer simply returns the initial prompts without modification.
    It's useful for testing or as a baseline comparison.

    Attributes:
        prompts (List[str]): List of prompts (unchanged from initialization).
        callbacks (List[Callable]): Empty list of callbacks.

    Args:
        initial_prompts (List[str]): Initial set of prompts.
        *args: Variable length argument list (unused).
        **kwargs: Arbitrary keyword arguments (unused).
    """

    def __init__(self, initial_prompts, *args, **kwargs):
        """Initialize the DummyOptimizer."""
        self.callbacks = []
        self.prompts = initial_prompts

    def optimize(self, n_steps) -> list[str]:
        """Simulate an optimization process without actually modifying the prompts.

        This method calls the callback methods to simulate a complete optimization
        cycle, but returns the initial prompts unchanged.

        Args:
            n_steps (int): Number of optimization steps (unused in this implementation).

        Returns:
            List[str]: The original list of prompts, unchanged.
        """
        self._on_step_end()
        self._on_epoch_end()
        self._on_train_end()

        return self.prompts

__init__(initial_prompts, *args, **kwargs)

Initialize the DummyOptimizer.

Source code in promptolution/optimizers/base_optimizer.py
def __init__(self, initial_prompts, *args, **kwargs):
    """Initialize the DummyOptimizer."""
    self.callbacks = []
    self.prompts = initial_prompts

optimize(n_steps)

Simulate an optimization process without actually modifying the prompts.

This method calls the callback methods to simulate a complete optimization cycle, but returns the initial prompts unchanged.

Parameters:

Name Type Description Default
n_steps int

Number of optimization steps (unused in this implementation).

required

Returns:

Type Description
list[str]

List[str]: The original list of prompts, unchanged.

Source code in promptolution/optimizers/base_optimizer.py
def optimize(self, n_steps) -> list[str]:
    """Simulate an optimization process without actually modifying the prompts.

    This method calls the callback methods to simulate a complete optimization
    cycle, but returns the initial prompts unchanged.

    Args:
        n_steps (int): Number of optimization steps (unused in this implementation).

    Returns:
        List[str]: The original list of prompts, unchanged.
    """
    self._on_step_end()
    self._on_epoch_end()
    self._on_train_end()

    return self.prompts

evoprompt_de

Module for EvoPromptDE optimizer.

EvoPromptDE

Bases: BaseOptimizer

EvoPromptDE: Differential Evolution-based Prompt Optimizer.

This class implements a differential evolution algorithm for optimizing prompts in large language models. It is adapted from the paper "Connecting Large Language Models with Evolutionary Algorithms Yields Powerful Prompt Optimizers" by Guo et al., 2023.

The optimizer uses a differential evolution strategy to generate new prompts from existing ones, with an option to use the current best prompt as a donor.

Attributes:

Name Type Description
prompt_template str

Template for generating meta-prompts during evolution.

donor_random bool

If False, uses the current best prompt as a donor; if True, uses a random prompt.

meta_llm

Language model used for generating child prompts from meta-prompts.

Parameters:

Name Type Description Default
prompt_template str

Template for meta-prompts.

None
meta_llm BaseLLM

Language model for child prompt generation.

None
donor_random bool

Whether to use a random donor. Defaults to False.

False
**args

Additional arguments passed to the BaseOptimizer.

{}
Source code in promptolution/optimizers/evoprompt_de.py
class EvoPromptDE(BaseOptimizer):
    """EvoPromptDE: Differential Evolution-based Prompt Optimizer.

    This class implements a differential evolution algorithm for optimizing prompts in large language models.
    It is adapted from the paper "Connecting Large Language Models with Evolutionary Algorithms
    Yields Powerful Prompt Optimizers" by Guo et al., 2023.

    The optimizer uses a differential evolution strategy to generate new prompts from existing ones,
    with an option to use the current best prompt as a donor.

    Attributes:
        prompt_template (str): Template for generating meta-prompts during evolution.
        donor_random (bool): If False, uses the current best prompt as a donor; if True, uses a random prompt.
        meta_llm: Language model used for generating child prompts from meta-prompts.

    Args:
        prompt_template (str): Template for meta-prompts.
        meta_llm: Language model for child prompt generation.
        donor_random (bool, optional): Whether to use a random donor. Defaults to False.
        **args: Additional arguments passed to the BaseOptimizer.
    """

    def __init__(
        self,
        prompt_template: str = None,
        meta_llm: BaseLLM = None,
        donor_random: bool = False,
        n_eval_samples: int = 20,
        **args
    ):
        """Initialize the EvoPromptDE optimizer."""
        self.prompt_template = prompt_template
        self.n_eval_samples = n_eval_samples
        self.donor_random = donor_random
        assert meta_llm is not None, "A meta language model must be provided."
        self.meta_llm = meta_llm
        super().__init__(**args)

    def optimize(self, n_steps: int) -> List[str]:
        """Perform the optimization process for a specified number of steps.

        This method iteratively improves the prompts using a differential evolution strategy.
        It evaluates prompts, generates new prompts using the DE algorithm, and replaces
        prompts if the new ones perform better.

        Args:
            n_steps (int): Number of optimization steps to perform.

        Returns:
            List[str]: The optimized list of prompts after all steps.
        """
        self.scores = self.task.evaluate(self.prompts, self.predictor, subsample=True, n_samples=self.n_eval_samples)
        self.prompts = [prompt for _, prompt in sorted(zip(self.scores, self.prompts), reverse=True)]
        self.scores = sorted(self.scores, reverse=True)

        for _ in range(n_steps):
            cur_best = self.prompts[0]
            meta_prompts = []
            for i in range(len(self.prompts)):
                # create meta prompts
                old_prompt = self.prompts[i]

                candidates = [prompt for prompt in self.prompts if prompt != old_prompt]
                a, b, c = np.random.choice(candidates, size=3, replace=False)

                if not self.donor_random:
                    c = cur_best

                meta_prompt = (
                    self.prompt_template.replace("<prompt0>", old_prompt)
                    .replace("<prompt1>", a)
                    .replace("<prompt2>", b)
                    .replace("<prompt3>", c)
                )

                meta_prompts.append(meta_prompt)

            child_prompts = self.meta_llm.get_response(meta_prompts)
            child_prompts = [prompt.split("<prompt>")[-1].split("</prompt>")[0].strip() for prompt in child_prompts]

            child_scores = self.task.evaluate(
                child_prompts, self.predictor, subsample=True, n_samples=self.n_eval_samples
            )

            for i in range(len(self.prompts)):
                if child_scores[i] > self.scores[i]:
                    self.prompts[i] = child_prompts[i]
                    self.scores[i] = child_scores[i]

            continue_optimization = self._on_step_end()

            if not continue_optimization:
                break

        self._on_train_end()

        return self.prompts

__init__(prompt_template=None, meta_llm=None, donor_random=False, n_eval_samples=20, **args)

Initialize the EvoPromptDE optimizer.

Source code in promptolution/optimizers/evoprompt_de.py
def __init__(
    self,
    prompt_template: str = None,
    meta_llm: BaseLLM = None,
    donor_random: bool = False,
    n_eval_samples: int = 20,
    **args
):
    """Initialize the EvoPromptDE optimizer."""
    self.prompt_template = prompt_template
    self.n_eval_samples = n_eval_samples
    self.donor_random = donor_random
    assert meta_llm is not None, "A meta language model must be provided."
    self.meta_llm = meta_llm
    super().__init__(**args)

optimize(n_steps)

Perform the optimization process for a specified number of steps.

This method iteratively improves the prompts using a differential evolution strategy. It evaluates prompts, generates new prompts using the DE algorithm, and replaces prompts if the new ones perform better.

Parameters:

Name Type Description Default
n_steps int

Number of optimization steps to perform.

required

Returns:

Type Description
List[str]

List[str]: The optimized list of prompts after all steps.

Source code in promptolution/optimizers/evoprompt_de.py
def optimize(self, n_steps: int) -> List[str]:
    """Perform the optimization process for a specified number of steps.

    This method iteratively improves the prompts using a differential evolution strategy.
    It evaluates prompts, generates new prompts using the DE algorithm, and replaces
    prompts if the new ones perform better.

    Args:
        n_steps (int): Number of optimization steps to perform.

    Returns:
        List[str]: The optimized list of prompts after all steps.
    """
    self.scores = self.task.evaluate(self.prompts, self.predictor, subsample=True, n_samples=self.n_eval_samples)
    self.prompts = [prompt for _, prompt in sorted(zip(self.scores, self.prompts), reverse=True)]
    self.scores = sorted(self.scores, reverse=True)

    for _ in range(n_steps):
        cur_best = self.prompts[0]
        meta_prompts = []
        for i in range(len(self.prompts)):
            # create meta prompts
            old_prompt = self.prompts[i]

            candidates = [prompt for prompt in self.prompts if prompt != old_prompt]
            a, b, c = np.random.choice(candidates, size=3, replace=False)

            if not self.donor_random:
                c = cur_best

            meta_prompt = (
                self.prompt_template.replace("<prompt0>", old_prompt)
                .replace("<prompt1>", a)
                .replace("<prompt2>", b)
                .replace("<prompt3>", c)
            )

            meta_prompts.append(meta_prompt)

        child_prompts = self.meta_llm.get_response(meta_prompts)
        child_prompts = [prompt.split("<prompt>")[-1].split("</prompt>")[0].strip() for prompt in child_prompts]

        child_scores = self.task.evaluate(
            child_prompts, self.predictor, subsample=True, n_samples=self.n_eval_samples
        )

        for i in range(len(self.prompts)):
            if child_scores[i] > self.scores[i]:
                self.prompts[i] = child_prompts[i]
                self.scores[i] = child_scores[i]

        continue_optimization = self._on_step_end()

        if not continue_optimization:
            break

    self._on_train_end()

    return self.prompts

evoprompt_ga

Module for EvoPromptGA optimizer.

EvoPromptGA

Bases: BaseOptimizer

EvoPromptGA: Genetic Algorithm-based Prompt Optimizer.

This class implements a genetic algorithm for optimizing prompts in large language models. It is adapted from the paper "Connecting Large Language Models with Evolutionary Algorithms Yields Powerful Prompt Optimizers" by Guo et al., 2023.

The optimizer uses crossover operations to generate new prompts from existing ones, with different selection methods available for choosing parent prompts.

Attributes:

Name Type Description
prompt_template str

Template for generating meta-prompts during crossover.

meta_llm

Language model used for generating child prompts from meta-prompts.

selection_mode str

Method for selecting parent prompts ('random', 'wheel', or 'tour').

Parameters:

Name Type Description Default
prompt_template str

Template for meta-prompts.

None
meta_llm BaseLLM

Language model for child prompt generation.

None
selection_mode str

Parent selection method. Defaults to "wheel".

'wheel'
**args

Additional arguments passed to the BaseOptimizer.

{}

Raises:

Type Description
AssertionError

If an invalid selection mode is provided.

Source code in promptolution/optimizers/evoprompt_ga.py
class EvoPromptGA(BaseOptimizer):
    """EvoPromptGA: Genetic Algorithm-based Prompt Optimizer.

    This class implements a genetic algorithm for optimizing prompts in large language models.
    It is adapted from the paper "Connecting Large Language Models with Evolutionary Algorithms
    Yields Powerful Prompt Optimizers" by Guo et al., 2023.

    The optimizer uses crossover operations to generate new prompts from existing ones,
    with different selection methods available for choosing parent prompts.

    Attributes:
        prompt_template (str): Template for generating meta-prompts during crossover.
        meta_llm: Language model used for generating child prompts from meta-prompts.
        selection_mode (str): Method for selecting parent prompts ('random', 'wheel', or 'tour').

    Args:
        prompt_template (str): Template for meta-prompts.
        meta_llm: Language model for child prompt generation.
        selection_mode (str, optional): Parent selection method. Defaults to "wheel".
        **args: Additional arguments passed to the BaseOptimizer.

    Raises:
        AssertionError: If an invalid selection mode is provided.
    """

    def __init__(
        self,
        prompt_template: str = None,
        meta_llm: BaseLLM = None,
        selection_mode: str = "wheel",
        n_eval_samples: int = 20,
        **args,
    ):
        """Initialize the EvoPromptGA optimizer."""
        self.prompt_template = prompt_template
        self.n_eval_samples = n_eval_samples
        assert meta_llm is not None, "Meta_llm is required"
        self.meta_llm = meta_llm
        assert selection_mode in ["random", "wheel", "tour"], "Invalid selection mode."
        self.selection_mode = selection_mode
        super().__init__(**args)

    def optimize(self, n_steps: int) -> List[str]:
        """Perform the optimization process for a specified number of steps.

        This method iteratively improves the prompts using genetic algorithm techniques.
        It evaluates prompts, performs crossover to generate new prompts, and selects
        the best prompts for the next generation.

        Args:
            n_steps (int): Number of optimization steps to perform.

        Returns:
            List[str]: The optimized list of prompts after all steps.
        """
        # get scores from task
        if self.verbosity > 1:
            self.scores, seq = self.task.evaluate(
                self.prompts, self.predictor, subsample=True, n_samples=self.n_eval_samples, return_seq=True
            )
            self.scores = self.scores.tolist()
            logger.warning(f"Initial scores: {self.scores}")
            logger.warning(f"Initial sequences: {seq}")
        else:
            self.scores = self.task.evaluate(
                self.prompts, self.predictor, subsample=True, n_samples=self.n_eval_samples
            ).tolist()
        # sort prompts by score
        self.prompts = [prompt for _, prompt in sorted(zip(self.scores, self.prompts), reverse=True)]
        self.scores = sorted(self.scores, reverse=True)

        for _ in range(n_steps):
            new_prompts = self._crossover(self.prompts, self.scores)
            prompts = self.prompts + new_prompts

            if self.verbosity > 1:
                logger.warning(f"Prompts: {prompts}")

            # evaluate new prompts
            if self.verbosity > 1:
                new_scores, seq = self.task.evaluate(
                    prompts, self.predictor, subsample=True, n_samples=self.n_eval_samples, return_seq=True
                )
                new_scores = new_scores.tolist()
                logger.warning(f"Scores: {new_scores}")
                logger.warning(f"Sequences: {seq}")

            else:
                new_scores = self.task.evaluate(
                    new_prompts, self.predictor, subsample=True, n_samples=self.n_eval_samples
                ).tolist()

            scores = self.scores + new_scores

            # sort scores and prompts
            self.prompts = [prompt for _, prompt in sorted(zip(scores, prompts), reverse=True)][: len(self.prompts)]
            self.scores = sorted(scores, reverse=True)[: len(self.prompts)]

            continue_optimization = self._on_step_end()
            if not continue_optimization:
                break

        self._on_train_end()
        return self.prompts

    def _crossover(self, prompts, scores) -> str:
        """Perform crossover operation to generate new child prompts.

        This method selects parent prompts based on the chosen selection mode,
        creates meta-prompts using the prompt template, and generates new child
        prompts using the meta language model.

        Args:
            prompts (List[str]): List of current prompts.
            scores (List[float]): Corresponding scores for the prompts.

        Returns:
            List[str]: Newly generated child prompts.
        """
        # parent selection
        if self.selection_mode == "wheel":
            wheel_idx = np.random.choice(
                np.arange(0, len(prompts)),
                size=len(prompts),
                replace=True,
                p=np.array(scores) / np.sum(scores) if np.sum(scores) > 0 else np.ones(len(scores)) / len(scores),
            ).tolist()
            parent_pop = [self.prompts[idx] for idx in wheel_idx]

        elif self.selection_mode in ["random", "tour"]:
            parent_pop = self.prompts

        # crossover
        meta_prompts = []
        for _ in self.prompts:
            if self.selection_mode in ["random", "wheel"]:
                parent_1, parent_2 = np.random.choice(parent_pop, size=2, replace=False)
            elif self.selection_mode == "tour":
                group_1 = np.random.choice(parent_pop, size=2, replace=False)
                group_2 = np.random.choice(parent_pop, size=2, replace=False)
                # use the best of each group based on scores
                parent_1 = group_1[np.argmax([self.scores[self.prompts.index(p)] for p in group_1])]
                parent_2 = group_2[np.argmax([self.scores[self.prompts.index(p)] for p in group_2])]

            meta_prompt = self.prompt_template.replace("<prompt1>", parent_1).replace("<prompt2>", parent_2)
            meta_prompts.append(meta_prompt)

        child_prompts = self.meta_llm.get_response(meta_prompts)
        if self.verbosity > 1:
            logger.warning("meta_prompts:")
            logger.warning(meta_prompts)
            logger.warning("child_prompts:")
            logger.warning(child_prompts)
        child_prompts = [prompt.split("<prompt>")[-1].split("</prompt>")[0].strip() for prompt in child_prompts]

        return child_prompts

__init__(prompt_template=None, meta_llm=None, selection_mode='wheel', n_eval_samples=20, **args)

Initialize the EvoPromptGA optimizer.

Source code in promptolution/optimizers/evoprompt_ga.py
def __init__(
    self,
    prompt_template: str = None,
    meta_llm: BaseLLM = None,
    selection_mode: str = "wheel",
    n_eval_samples: int = 20,
    **args,
):
    """Initialize the EvoPromptGA optimizer."""
    self.prompt_template = prompt_template
    self.n_eval_samples = n_eval_samples
    assert meta_llm is not None, "Meta_llm is required"
    self.meta_llm = meta_llm
    assert selection_mode in ["random", "wheel", "tour"], "Invalid selection mode."
    self.selection_mode = selection_mode
    super().__init__(**args)

optimize(n_steps)

Perform the optimization process for a specified number of steps.

This method iteratively improves the prompts using genetic algorithm techniques. It evaluates prompts, performs crossover to generate new prompts, and selects the best prompts for the next generation.

Parameters:

Name Type Description Default
n_steps int

Number of optimization steps to perform.

required

Returns:

Type Description
List[str]

List[str]: The optimized list of prompts after all steps.

Source code in promptolution/optimizers/evoprompt_ga.py
def optimize(self, n_steps: int) -> List[str]:
    """Perform the optimization process for a specified number of steps.

    This method iteratively improves the prompts using genetic algorithm techniques.
    It evaluates prompts, performs crossover to generate new prompts, and selects
    the best prompts for the next generation.

    Args:
        n_steps (int): Number of optimization steps to perform.

    Returns:
        List[str]: The optimized list of prompts after all steps.
    """
    # get scores from task
    if self.verbosity > 1:
        self.scores, seq = self.task.evaluate(
            self.prompts, self.predictor, subsample=True, n_samples=self.n_eval_samples, return_seq=True
        )
        self.scores = self.scores.tolist()
        logger.warning(f"Initial scores: {self.scores}")
        logger.warning(f"Initial sequences: {seq}")
    else:
        self.scores = self.task.evaluate(
            self.prompts, self.predictor, subsample=True, n_samples=self.n_eval_samples
        ).tolist()
    # sort prompts by score
    self.prompts = [prompt for _, prompt in sorted(zip(self.scores, self.prompts), reverse=True)]
    self.scores = sorted(self.scores, reverse=True)

    for _ in range(n_steps):
        new_prompts = self._crossover(self.prompts, self.scores)
        prompts = self.prompts + new_prompts

        if self.verbosity > 1:
            logger.warning(f"Prompts: {prompts}")

        # evaluate new prompts
        if self.verbosity > 1:
            new_scores, seq = self.task.evaluate(
                prompts, self.predictor, subsample=True, n_samples=self.n_eval_samples, return_seq=True
            )
            new_scores = new_scores.tolist()
            logger.warning(f"Scores: {new_scores}")
            logger.warning(f"Sequences: {seq}")

        else:
            new_scores = self.task.evaluate(
                new_prompts, self.predictor, subsample=True, n_samples=self.n_eval_samples
            ).tolist()

        scores = self.scores + new_scores

        # sort scores and prompts
        self.prompts = [prompt for _, prompt in sorted(zip(scores, prompts), reverse=True)][: len(self.prompts)]
        self.scores = sorted(scores, reverse=True)[: len(self.prompts)]

        continue_optimization = self._on_step_end()
        if not continue_optimization:
            break

    self._on_train_end()
    return self.prompts

opro

Module for OPRO.

Opro

Bases: BaseOptimizer

Opro: Optimization by PROmpting.

Proposed by the paper "Large Language Models as Optimizers" by Yang et. al: https://arxiv.org/abs/2309.03409. This Optimizer works by providing the Meta-LLM with a task-description, as well as previous prompts with their respective score.

Attributes:

Name Type Description
llm BaseLLM

The Meta-LLM to optimize.

n_samples int

The number of samples from the task dataset to show the Meta-LLM.

Methods:

Name Description
_sample_examples

Sample examples from the task dataset.

_format_old_instructions

Format the previous prompts and their scores.

optimize

Optimize the Meta-LLM by providing it with a new prompt.

Source code in promptolution/optimizers/opro.py
class Opro(BaseOptimizer):
    """Opro: Optimization by PROmpting.

    Proposed by the paper "Large Language Models as Optimizers" by Yang et. al: https://arxiv.org/abs/2309.03409.
    This Optimizer works by providing the Meta-LLM with a task-description, as well as previous
    prompts with their respective score.

    Attributes:
        llm (BaseLLM): The Meta-LLM to optimize.
        n_samples (int): The number of samples from the task dataset to show the Meta-LLM.

    Methods:
        _sample_examples: Sample examples from the task dataset.
        _format_old_instructions: Format the previous prompts and their scores.
        optimize: Optimize the Meta-LLM by providing it with a new prompt.
    """

    def __init__(self, meta_llm: BaseLLM, n_samples: int = 2, prompt_template: str = None, **args):
        """Initialize the Opro optimizer."""
        self.meta_llm = meta_llm

        assert n_samples > 0, "n_samples must be greater than 0."
        self.n_samples = n_samples

        self.meta_prompt = prompt_template if prompt_template else OPRO_TEMPLATE

        super().__init__(**args)

        self.scores = [
            self.task.evaluate(p, self.predictor, subsample=True, n_samples=self.n_eval_samples)[0]
            for p in self.prompts
        ]

    def _sample_examples(self):
        """Sample examples from the task dataset with their label.

        Returns:
            str: The formatted string of sampled examples.
        """
        idx = np.random.choice(len(self.task.xs), self.n_samples)
        sample_x = self.task.xs[idx]
        sample_y = self.task.ys[idx]

        return "\n".join([f"Input: {x}\nOutput: {y}" for x, y in zip(sample_x, sample_y)])

    def _format_old_instructions(self):
        """Format the previous prompts and their respective scores.

        Returns:
            str: The formatted string of previous prompts and their scores.
        """
        return "".join(
            [
                f"The old instruction was:\n{prompt}\nIt scored: {score}\n\n"
                for prompt, score in zip(self.prompts, self.scores)
            ]
        )

    def optimize(self, n_steps: int) -> List[str]:
        """Optimize the Meta-LLM by providing it with a new prompt.

        Args:
            n_steps (int): The number of optimization steps to perform.

        Returns:
            str: The best prompt found by the optimizer.
        """
        for _ in range(n_steps):
            meta_prompt = self.meta_prompt.replace("<old_instructions>", self._format_old_instructions()).replace(
                "<examples>", self._sample_examples()
            )

            prompt = self.meta_llm.get_response([meta_prompt])[0]
            prompt = prompt.split("<prompt>")[-1].split("</prompt>")[0].strip()
            score = self.task.evaluate(prompt, self.predictor, subsample=True, n_samples=self.n_eval_samples)

            self.prompts.append(prompt)
            self.scores.append(score)

            continue_optimization = self._on_step_end()
            if not continue_optimization:
                break

        self._on_epoch_end()

        return self.prompts

__init__(meta_llm, n_samples=2, prompt_template=None, **args)

Initialize the Opro optimizer.

Source code in promptolution/optimizers/opro.py
def __init__(self, meta_llm: BaseLLM, n_samples: int = 2, prompt_template: str = None, **args):
    """Initialize the Opro optimizer."""
    self.meta_llm = meta_llm

    assert n_samples > 0, "n_samples must be greater than 0."
    self.n_samples = n_samples

    self.meta_prompt = prompt_template if prompt_template else OPRO_TEMPLATE

    super().__init__(**args)

    self.scores = [
        self.task.evaluate(p, self.predictor, subsample=True, n_samples=self.n_eval_samples)[0]
        for p in self.prompts
    ]

optimize(n_steps)

Optimize the Meta-LLM by providing it with a new prompt.

Parameters:

Name Type Description Default
n_steps int

The number of optimization steps to perform.

required

Returns:

Name Type Description
str List[str]

The best prompt found by the optimizer.

Source code in promptolution/optimizers/opro.py
def optimize(self, n_steps: int) -> List[str]:
    """Optimize the Meta-LLM by providing it with a new prompt.

    Args:
        n_steps (int): The number of optimization steps to perform.

    Returns:
        str: The best prompt found by the optimizer.
    """
    for _ in range(n_steps):
        meta_prompt = self.meta_prompt.replace("<old_instructions>", self._format_old_instructions()).replace(
            "<examples>", self._sample_examples()
        )

        prompt = self.meta_llm.get_response([meta_prompt])[0]
        prompt = prompt.split("<prompt>")[-1].split("</prompt>")[0].strip()
        score = self.task.evaluate(prompt, self.predictor, subsample=True, n_samples=self.n_eval_samples)

        self.prompts.append(prompt)
        self.scores.append(score)

        continue_optimization = self._on_step_end()
        if not continue_optimization:
            break

    self._on_epoch_end()

    return self.prompts