跳转至

Llm

OpenAI

OpenAI(
    model: str,
    base_url: Optional[str] = "https://api.openai.com/v1",
    api_key: Optional[str] = None,
    key_name: Optional[str] = None,
    client: Optional[OpenAI] = None,
    verbose: Optional[bool] = False,
    logger: Optional[Union[Logger, Callable]] = None,
    **kwargs: Any
)

Initialize a new instance of OpenAI client.

Parameters:

Name Type Description Default
model str

The model to use for completion.

required
base_url Optional[str]

The base URL of the API endpoint.

'https://api.openai.com/v1'
api_key Optional[str]

The API key used for authentication.

None
key_name Optional[str]

The name of the API key used for authentication. If not provided, the first API key in the environment variables will be used.

None
client Optional[OpenAI]

The HTTP client instance used to make requests to the API. This could be an instance of a library like requests or a custom client implementation.

None
verbose Optional[bool]

A boolean flag indicating whether to enable verbose output. When set to True, additional debugging information or logs will be displayed.

False
logger Optional[Union[Logger, Callable]]

A logger instance used for logging messages.

None
**kwargs Any

Additional keyword arguments.

{}

Examples:

from lumix.llm import OpenAI

base_url = "https://open.bigmodel.cn/api/paas/v4"
llm = OpenAI(model="glm-4-flash", base_url=base_url, api_key="your_api_key")

Source code in lumix\llm\completion\openai.py
def __init__(
        self,
        model: str,
        base_url: Optional[str] = "https://api.openai.com/v1",
        api_key: Optional[str] = None,
        key_name: Optional[str] = None,
        client: Optional[OpenAIOriginal] = None,
        verbose: Optional[bool] = False,
        logger: Optional[Union[Logger, Callable]] = None,
        **kwargs: Any,
):
    """ Initialize a new instance of OpenAI client.

    Args:
        model:
            The model to use for completion.
        base_url:
            The base URL of the API endpoint.
        api_key:
            The API key used for authentication.
        key_name:
            The name of the API key used for authentication. If not provided, the first
            API key in the environment variables will be used.
        client:
            The HTTP client instance used to make requests to the API. This could be an instance
            of a library like `requests` or a custom client implementation.
        verbose:
            A boolean flag indicating whether to enable verbose output. When set to True,
            additional debugging information or logs will be displayed.
        logger:
            A logger instance used for logging messages.
        **kwargs:
            Additional keyword arguments.
    Examples:
        ```python
        from lumix.llm import OpenAI

        base_url = "https://open.bigmodel.cn/api/paas/v4"
        llm = OpenAI(model="glm-4-flash", base_url=base_url, api_key="your_api_key")
        ```
    """
    self.model = model
    self.base_url = base_url
    self.api_key = api_key
    self.key_name = key_name
    self.set_client(client)
    self.logger = logger
    self.verbose = verbose
    self.kwargs = kwargs

completion

completion(
    prompt: Optional[str] = None,
    messages: Optional[
        Union[List[TypeMessage], List[Dict]]
    ] = None,
    stream: Optional[bool] = False,
    tools: List[Dict] = None,
    **kwargs
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]

Call OpenAI API to get a completion.

Parameters:

Name Type Description Default
prompt Optional[str]

The prompt to generate a completion.

None
messages Optional[Union[List[TypeMessage], List[Dict]]]

The messages to generate a completion.

None
stream Optional[bool]

Whether to stream the response or not.

False
tools List[Dict]

The tools to generate a completion.

None
**kwargs
{}

Returns:

Type Description
Union[ChatCompletion, Stream[ChatCompletionChunk]]

Union[ChatCompletion, Stream[ChatCompletionChunk]]

Examples:

completion = self.llm.completion(prompt="你好")
print(completion.choices[0].message.content)
Source code in lumix\llm\completion\openai.py
def completion(
        self,
        prompt: Optional[str] = None,
        messages: Optional[Union[List[TypeMessage], List[Dict]]] = None,
        stream: Optional[bool] = False,
        tools: List[Dict] = None,
        **kwargs,
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
    """ Call OpenAI API to get a completion.

    Args:
        prompt: The prompt to generate a completion.
        messages: The messages to generate a completion.
        stream: Whether to stream the response or not.
        tools: The tools to generate a completion.
        **kwargs:

    Returns:
        Union[ChatCompletion, Stream[ChatCompletionChunk]]

    Examples:
        ```python
        completion = self.llm.completion(prompt="你好")
        print(completion.choices[0].message.content)
        ```
    """
    if prompt is not None:
        messages = [Message(role="user", content=prompt)]

    if not isinstance(messages[0], dict):
        messages = [msg.to_dict() for msg in messages]

    self._logger(msg=f"[User] {messages[-1].get("content")}\n", color="blue")
    completion = self.client.chat.completions.create(
        model=self.model, messages=messages, tools=tools, stream=stream, **kwargs)
    if stream:
        return self.sse(completion)
    else:
        return self.sync(completion)

sse

sse(
    completion: Stream[ChatCompletionChunk],
) -> Stream[ChatCompletionChunk]
Source code in lumix\llm\completion\openai.py
def sse(self, completion: Stream[ChatCompletionChunk]) -> Stream[ChatCompletionChunk]:
    """"""
    content = ""
    for chunk in completion:
        if chunk.choices[0].delta.content is not None:
            content += chunk.choices[0].delta.content
        yield chunk
    self._logger(msg=f"[Assistant] {content}\n", color="green")

sync

sync(completion: ChatCompletion) -> ChatCompletion
Source code in lumix\llm\completion\openai.py
def sync(self, completion: ChatCompletion) -> ChatCompletion:
    """"""
    self._logger(msg=f"[Assistant] {completion.choices[0].message.content}\n", color="green")
    return completion

structured_schema

structured_schema(schema: ModelMetaclass) -> List[Dict]
Source code in lumix\llm\completion\openai.py
def structured_schema(self, schema: ModelMetaclass,) -> List[Dict]:
    """"""
    json_schema = schema.model_json_schema()
    schema_tools = [{
        'type': 'function',
        'function': {
            'name': json_schema.get("title"),
            'description': json_schema.get("description"),
            "parameters": {
                "type": "object",
                'properties': json_schema.get("properties"),
                'required': json_schema.get("required")
            },
        }}]
    return schema_tools

parse_dict

parse_dict(arguments: str) -> Dict
Source code in lumix\llm\completion\openai.py
def parse_dict(self, arguments: str) -> Dict:
    """"""
    try:
        return json.loads(arguments)
    except Exception as e:
        raise ValueError(f"Invalid JSON: {e}")

structured_output

structured_output(
    schema: ModelMetaclass,
    prompt: Optional[str] = None,
    messages: Optional[
        Union[List[TypeMessage], List[Dict]]
    ] = None,
    **kwargs
) -> Dict

结构化输出

Parameters:

Name Type Description Default
schema ModelMetaclass

输出结构Scheme

required
prompt Optional[str]

prompt

None
messages Optional[Union[List[TypeMessage], List[Dict]]]

messages

None
**kwargs
{}

Returns:

Type Description
Dict

结构化数据

Examples:

class Joke(BaseModel):
    '''Joke to tell user.'''
    setup: str = Field(description="The setup of the joke")
    punchline: str = Field(description="The punchline to the joke")
    rating: int = Field(description="How funny the joke is, from 1 to 10")

data = self.llm.structured_output(schema=Joke, prompt="给我讲个简单的笑话")
pprint(data)
Source code in lumix\llm\completion\openai.py
def structured_output(
        self,
        schema: ModelMetaclass,
        prompt: Optional[str] = None,
        messages: Optional[Union[List[TypeMessage], List[Dict]]] = None,
        **kwargs
) -> Dict:
    """结构化输出

    Args:
        schema: 输出结构Scheme
        prompt: prompt
        messages: messages
        **kwargs:

    Returns:
        结构化数据

    Examples:
        ```python
        class Joke(BaseModel):
            '''Joke to tell user.'''
            setup: str = Field(description="The setup of the joke")
            punchline: str = Field(description="The punchline to the joke")
            rating: int = Field(description="How funny the joke is, from 1 to 10")

        data = self.llm.structured_output(schema=Joke, prompt="给我讲个简单的笑话")
        pprint(data)
        ```

    """
    schema_tools = self.structured_schema(schema)
    completion = self.completion(
        prompt=prompt, messages=messages, stream=False, tools=schema_tools, **kwargs)
    if completion.choices[0].message.tool_calls is not None:
        return self.parse_dict(completion.choices[0].message.tool_calls[0].function.arguments)
    else:
        content = completion.choices[0].message.content
        self.error(msg=f"[{__class__.__name__}] No structured data found in the response: {content}")
        return {}