Skip to content

Latest commit

 

History

History
198 lines (134 loc) · 12.2 KB

File metadata and controls

198 lines (134 loc) · 12.2 KB

Classifiers

Overview

Classifiers API.

Available Operations

moderate

Moderations

Example Usage

from mistralai.client import Mistral
import os


with Mistral(
    api_key=os.getenv("MISTRAL_API_KEY", ""),
) as mistral:

    res = mistral.classifiers.moderate(model="mistral-moderation-latest", inputs="<value>")

    # Handle response
    print(res)

Parameters

Parameter Type Required Description Example
model str ✔️ ID of the model to use. mistral-moderation-latest
inputs models.ClassificationRequestInputs ✔️ Text to classify.
metadata Dict[str, Any] N/A
retries Optional[utils.RetryConfig] Configuration to override the default retry behavior of the client.

Response

models.ModerationResponse

Errors

Error Type Status Code Content Type
errors.HTTPValidationError 422 application/json
errors.SDKError 4XX, 5XX */*

moderate_chat

Chat Moderations

Example Usage

from mistralai.client import Mistral
import os


with Mistral(
    api_key=os.getenv("MISTRAL_API_KEY", ""),
) as mistral:

    res = mistral.classifiers.moderate_chat(inputs=[
        {
            "role": "tool",
            "content": "<value>",
        },
    ], model="LeBaron")

    # Handle response
    print(res)

Parameters

Parameter Type Required Description
inputs models.ChatModerationRequestInputs3 ✔️ Chat to classify
model str ✔️ N/A
retries Optional[utils.RetryConfig] Configuration to override the default retry behavior of the client.

Response

models.ModerationResponse

Errors

Error Type Status Code Content Type
errors.HTTPValidationError 422 application/json
errors.SDKError 4XX, 5XX */*

classify

Classifications

Example Usage

from mistralai.client import Mistral
import os


with Mistral(
    api_key=os.getenv("MISTRAL_API_KEY", ""),
) as mistral:

    res = mistral.classifiers.classify(model="mistral-moderation-latest", inputs=[
        "<value 1>",
    ])

    # Handle response
    print(res)

Parameters

Parameter Type Required Description Example
model str ✔️ ID of the model to use. mistral-moderation-latest
inputs models.ClassificationRequestInputs ✔️ Text to classify.
metadata Dict[str, Any] N/A
retries Optional[utils.RetryConfig] Configuration to override the default retry behavior of the client.

Response

models.ClassificationResponse

Errors

Error Type Status Code Content Type
errors.HTTPValidationError 422 application/json
errors.SDKError 4XX, 5XX */*

classify_chat

Chat Classifications

Example Usage

from mistralai.client import Mistral
import os


with Mistral(
    api_key=os.getenv("MISTRAL_API_KEY", ""),
) as mistral:

    res = mistral.classifiers.classify_chat(model="Camry", input=[
        {
            "messages": [
                {
                    "role": "system",
                    "content": "<value>",
                },
            ],
        },
    ])

    # Handle response
    print(res)

Parameters

Parameter Type Required Description
model str ✔️ N/A
input models.Inputs ✔️ Chat to classify
retries Optional[utils.RetryConfig] Configuration to override the default retry behavior of the client.

Response

models.ClassificationResponse

Errors

Error Type Status Code Content Type
errors.HTTPValidationError 422 application/json
errors.SDKError 4XX, 5XX */*