Skip to content

mistralai_tracer_v1

ChatCompletionResponse

Bases: ChatCompletionResponse

Wrapper of mistralai.models.ChatCompletionResponse with ImpactsOutput

CompletionChunk

Bases: CompletionChunk

Wrapper of mistralai.models.CompletionChunk with ImpactsOutput

MistralAIInstrumentor()

Instrumentor initialized by EcoLogits to automatically wrap all MistralAI calls

Source code in ecologits/tracers/mistralai_tracer_v1.py
def __init__(self) -> None:
    self.wrapped_methods = [
        {
            "module": "mistralai.chat",
            "name": "Chat.complete",
            "wrapper": mistralai_chat_wrapper,
        },
        {
            "module": "mistralai.chat",
            "name": "Chat.complete_async",
            "wrapper": mistralai_async_chat_wrapper,
        },
        {
            "module": "mistralai.chat",
            "name": "Chat.stream",
            "wrapper": mistralai_chat_wrapper_stream,
        },
        {
            "module": "mistralai.chat",
            "name": "Chat.stream_async",
            "wrapper": mistralai_async_chat_wrapper_stream,
        },
    ]

mistralai_chat_wrapper(wrapped, instance, args, kwargs)

Function that wraps a MistralAI answer with computed impacts

Parameters:

Name Type Description Default
wrapped Callable

Callable that returns the LLM response

required
instance Mistral

Never used - for compatibility with wrapt

required
args Any

Arguments of the callable

required
kwargs Any

Keyword arguments of the callable

required

Returns:

Type Description
ChatCompletionResponse

A wrapped ChatCompletionResponse with impacts

Source code in ecologits/tracers/mistralai_tracer_v1.py
def mistralai_chat_wrapper(
    wrapped: Callable, instance: Mistral, args: Any, kwargs: Any  # noqa: ARG001
) -> ChatCompletionResponse:
    """
    Function that wraps a MistralAI answer with computed impacts

    Args:
        wrapped: Callable that returns the LLM response
        instance: Never used - for compatibility with `wrapt`
        args: Arguments of the callable
        kwargs: Keyword arguments of the callable

    Returns:
        A wrapped `ChatCompletionResponse` with impacts
    """
    timer_start = time.perf_counter()
    response = wrapped(*args, **kwargs)
    request_latency = time.perf_counter() - timer_start
    impacts = llm_impacts(
        provider=PROVIDER,
        model_name=response.model,
        output_token_count=response.usage.completion_tokens,
        request_latency=request_latency,
        electricity_mix_zone=EcoLogits.config.electricity_mix_zone
    )
    if impacts is not None:
        return ChatCompletionResponse(**response.model_dump(), impacts=impacts)
    else:
        return response

mistralai_chat_wrapper_stream(wrapped, instance, args, kwargs)

Function that wraps a MistralAI answer with computed impacts in streaming mode

Parameters:

Name Type Description Default
wrapped Callable

Callable that returns the LLM response

required
instance Mistral

Never used - for compatibility with wrapt

required
args Any

Arguments of the callable

required
kwargs Any

Keyword arguments of the callable

required

Returns:

Type Description
Iterable[CompletionEvent]

A wrapped Iterable[CompletionEvent] with impacts

Source code in ecologits/tracers/mistralai_tracer_v1.py
def mistralai_chat_wrapper_stream(
    wrapped: Callable, instance: Mistral, args: Any, kwargs: Any  # noqa: ARG001
) -> Iterable[CompletionEvent]:
    """
    Function that wraps a MistralAI answer with computed impacts in streaming mode

    Args:
        wrapped: Callable that returns the LLM response
        instance: Never used - for compatibility with `wrapt`
        args: Arguments of the callable
        kwargs: Keyword arguments of the callable

    Returns:
        A wrapped `Iterable[CompletionEvent]` with impacts
    """
    timer_start = time.perf_counter()
    stream = wrapped(*args, **kwargs)
    token_count = 0
    for i, chunk in enumerate(stream):
        if i > 0 and chunk.data.choices[0].finish_reason is None:
            token_count += 1
        request_latency = time.perf_counter() - timer_start
        model_name = chunk.data.model
        impacts = llm_impacts(
            provider=PROVIDER,
            model_name=model_name,
            output_token_count=token_count,
            request_latency=request_latency,
            electricity_mix_zone=EcoLogits.config.electricity_mix_zone
        )
        if impacts is not None:
            chunk.data = CompletionChunk(**chunk.data.model_dump(), impacts=impacts)
            yield chunk
        else:
            yield chunk

mistralai_async_chat_wrapper(wrapped, instance, args, kwargs) async

Function that wraps a MistralAI answer with computed impacts in async mode

Parameters:

Name Type Description Default
wrapped Callable

Async callable that returns the LLM response

required
instance Mistral

Never used - for compatibility with wrapt

required
args Any

Arguments of the callable

required
kwargs Any

Keyword arguments of the callable

required

Returns:

Type Description
ChatCompletionResponse

A wrapped ChatCompletionResponse with impacts

Source code in ecologits/tracers/mistralai_tracer_v1.py
async def mistralai_async_chat_wrapper(
    wrapped: Callable,
    instance: Mistral,  # noqa: ARG001
    args: Any,
    kwargs: Any,
) -> ChatCompletionResponse:
    """
    Function that wraps a MistralAI answer with computed impacts in async mode

    Args:
        wrapped: Async callable that returns the LLM response
        instance: Never used - for compatibility with `wrapt`
        args: Arguments of the callable
        kwargs: Keyword arguments of the callable

    Returns:
        A wrapped `ChatCompletionResponse` with impacts
    """
    timer_start = time.perf_counter()
    response = await wrapped(*args, **kwargs)
    request_latency = time.perf_counter() - timer_start
    impacts = llm_impacts(
        provider=PROVIDER,
        model_name=response.model,
        output_token_count=response.usage.completion_tokens,
        request_latency=request_latency,
        electricity_mix_zone=EcoLogits.config.electricity_mix_zone
    )
    if impacts is not None:
        return ChatCompletionResponse(**response.model_dump(), impacts=impacts)
    else:
        return response

mistralai_async_chat_wrapper_stream(wrapped, instance, args, kwargs) async

Function that wraps a MistralAI answer with computed impacts in streaming and async mode

Parameters:

Name Type Description Default
wrapped Callable

Callable that returns the LLM response

required
instance Mistral

Never used - for compatibility with wrapt

required
args Any

Arguments of the callable

required
kwargs Any

Keyword arguments of the callable

required

Returns:

Type Description
AsyncGenerator[CompletionEvent, None]

A wrapped AsyncGenerator[CompletionEvent, None] with impacts

Source code in ecologits/tracers/mistralai_tracer_v1.py
async def mistralai_async_chat_wrapper_stream(
    wrapped: Callable,
    instance: Mistral,  # noqa: ARG001
    args: Any,
    kwargs: Any,
) -> AsyncGenerator[CompletionEvent, None]:
    """
    Function that wraps a MistralAI answer with computed impacts in streaming and async mode

    Args:
        wrapped: Callable that returns the LLM response
        instance: Never used - for compatibility with `wrapt`
        args: Arguments of the callable
        kwargs: Keyword arguments of the callable

    Returns:
        A wrapped `AsyncGenerator[CompletionEvent, None]` with impacts
    """
    timer_start = time.perf_counter()
    stream = await wrapped(*args, **kwargs)
    return _generator(stream, timer_start)