Skip to content

Commit 3fc5d47

Browse files
committed
feat: Introduce ManagedModel and ModelRunner
feat: Add ModelRunner ABC with invoke_model() and invoke_structured_model() feat: Add ManagedModel replacing Chat; expose get_model_runner() escape hatch feat!: Rename ChatResponse to ModelResponse in providers/types.py feat!: Extract OpenAIModelRunner from OpenAIRunnerFactory; factory is now model-creation-only feat!: Extract LangChainModelRunner from LangChainRunnerFactory; factory is now model-creation-only feat: Add OpenAIHelper with shared utilities for model and future agent runners feat: Add LangChainHelper with shared utilities for model and future agent runners feat!: LangChainRunnerFactory is now a no-arg factory; static helpers moved to LangChainHelper fix: LDClient.create_chat() is deprecated in favour of create_model() fix: Chat alias in ldai.chat is deprecated in favour of ManagedModel fix: Rename ai_provider param to model_runner in Judge and ManagedModel
1 parent 4bef65c commit 3fc5d47

21 files changed

Lines changed: 726 additions & 730 deletions
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,14 @@
11
"""LaunchDarkly AI SDK - LangChain Connector."""
22

3+
from ldai_langchain.langchain_helper import LangChainHelper
4+
from ldai_langchain.langchain_model_runner import LangChainModelRunner
35
from ldai_langchain.langchain_runner_factory import LangChainRunnerFactory
46

57
__version__ = "0.1.0"
68

79
__all__ = [
810
'__version__',
911
'LangChainRunnerFactory',
12+
'LangChainHelper',
13+
'LangChainModelRunner',
1014
]
Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
"""Shared LangChain utilities for the LaunchDarkly AI SDK."""
2+
3+
from typing import Any, Dict, List, Optional, Union
4+
5+
from langchain_core.language_models.chat_models import BaseChatModel
6+
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
7+
from ldai import LDMessage
8+
from ldai.models import AIConfigKind
9+
from ldai.providers.types import LDAIMetrics
10+
from ldai.tracker import TokenUsage
11+
12+
13+
class LangChainHelper:
14+
"""
15+
Shared utilities for LangChain-based runners (model, agent, agent graph).
16+
17+
All methods are static — this class is a namespace, not meant to be instantiated.
18+
"""
19+
20+
@staticmethod
21+
def map_provider(ld_provider_name: str) -> str:
22+
"""
23+
Map a LaunchDarkly provider name to its LangChain equivalent.
24+
25+
:param ld_provider_name: LaunchDarkly provider name
26+
:return: LangChain-compatible provider name
27+
"""
28+
mapping: Dict[str, str] = {'gemini': 'google-genai'}
29+
return mapping.get(ld_provider_name.lower(), ld_provider_name.lower())
30+
31+
@staticmethod
32+
def convert_messages(
33+
messages: List[LDMessage],
34+
) -> List[Union[HumanMessage, SystemMessage, AIMessage]]:
35+
"""
36+
Convert LaunchDarkly messages to LangChain message objects.
37+
38+
:param messages: List of LDMessage objects
39+
:return: List of LangChain message objects
40+
:raises ValueError: If an unsupported message role is encountered
41+
"""
42+
result: List[Union[HumanMessage, SystemMessage, AIMessage]] = []
43+
for msg in messages:
44+
if msg.role == 'system':
45+
result.append(SystemMessage(content=msg.content))
46+
elif msg.role == 'user':
47+
result.append(HumanMessage(content=msg.content))
48+
elif msg.role == 'assistant':
49+
result.append(AIMessage(content=msg.content))
50+
else:
51+
raise ValueError(f'Unsupported message role: {msg.role}')
52+
return result
53+
54+
@staticmethod
55+
def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel:
56+
"""
57+
Create a LangChain BaseChatModel from a LaunchDarkly AI configuration.
58+
59+
:param ai_config: The LaunchDarkly AI configuration
60+
:return: A configured LangChain BaseChatModel
61+
"""
62+
from langchain.chat_models import init_chat_model
63+
64+
config_dict = ai_config.to_dict()
65+
model_dict = config_dict.get('model') or {}
66+
provider_dict = config_dict.get('provider') or {}
67+
68+
model_name = model_dict.get('name', '')
69+
provider = provider_dict.get('name', '')
70+
parameters = model_dict.get('parameters') or {}
71+
72+
return init_chat_model(
73+
model_name,
74+
model_provider=LangChainHelper.map_provider(provider),
75+
**parameters,
76+
)
77+
78+
@staticmethod
79+
def get_ai_metrics_from_response(response: Any) -> LDAIMetrics:
80+
"""
81+
Extract LaunchDarkly AI metrics from a LangChain response.
82+
83+
:param response: The response from a LangChain model (BaseMessage or similar)
84+
:return: LDAIMetrics with success status and token usage
85+
"""
86+
usage: Optional[TokenUsage] = None
87+
if hasattr(response, 'response_metadata') and response.response_metadata:
88+
token_usage = (
89+
response.response_metadata.get('tokenUsage')
90+
or response.response_metadata.get('token_usage')
91+
)
92+
if token_usage:
93+
usage = TokenUsage(
94+
total=token_usage.get('totalTokens', 0) or token_usage.get('total_tokens', 0),
95+
input=token_usage.get('promptTokens', 0) or token_usage.get('prompt_tokens', 0),
96+
output=token_usage.get('completionTokens', 0) or token_usage.get('completion_tokens', 0),
97+
)
98+
return LDAIMetrics(success=True, usage=usage)
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
"""LangChain model runner for LaunchDarkly AI SDK."""
2+
3+
from typing import Any, Dict, List
4+
5+
from langchain_core.language_models.chat_models import BaseChatModel
6+
from langchain_core.messages import BaseMessage
7+
from ldai import LDMessage, log
8+
from ldai.providers.model_runner import ModelRunner
9+
from ldai.providers.types import LDAIMetrics, ModelResponse, StructuredResponse
10+
from ldai.tracker import TokenUsage
11+
from ldai_langchain.langchain_helper import LangChainHelper
12+
13+
14+
class LangChainModelRunner(ModelRunner):
15+
"""
16+
ModelRunner implementation for LangChain.
17+
18+
Holds a fully-configured BaseChatModel.
19+
Returned by LangChainConnector.create_model(config).
20+
"""
21+
22+
def __init__(self, llm: BaseChatModel):
23+
self._llm = llm
24+
25+
def get_llm(self) -> BaseChatModel:
26+
"""
27+
Return the underlying LangChain BaseChatModel.
28+
29+
:return: The BaseChatModel instance
30+
"""
31+
return self._llm
32+
33+
async def invoke_model(self, messages: List[LDMessage]) -> ModelResponse:
34+
"""
35+
Invoke the LangChain model with an array of messages.
36+
37+
:param messages: Array of LDMessage objects representing the conversation
38+
:return: ModelResponse containing the model's response and metrics
39+
"""
40+
try:
41+
langchain_messages = LangChainHelper.convert_messages(messages)
42+
response: BaseMessage = await self._llm.ainvoke(langchain_messages)
43+
metrics = LangChainHelper.get_ai_metrics_from_response(response)
44+
45+
content: str = ''
46+
if isinstance(response.content, str):
47+
content = response.content
48+
else:
49+
log.warning(
50+
f'Multimodal response not supported, expecting a string. '
51+
f'Content type: {type(response.content)}, Content: {response.content}'
52+
)
53+
metrics = LDAIMetrics(success=False, usage=metrics.usage)
54+
55+
return ModelResponse(
56+
message=LDMessage(role='assistant', content=content),
57+
metrics=metrics,
58+
)
59+
except Exception as error:
60+
log.warning(f'LangChain model invocation failed: {error}')
61+
return ModelResponse(
62+
message=LDMessage(role='assistant', content=''),
63+
metrics=LDAIMetrics(success=False, usage=None),
64+
)
65+
66+
async def invoke_structured_model(
67+
self,
68+
messages: List[LDMessage],
69+
response_structure: Dict[str, Any],
70+
) -> StructuredResponse:
71+
"""
72+
Invoke the LangChain model with structured output support.
73+
74+
:param messages: Array of LDMessage objects representing the conversation
75+
:param response_structure: Dictionary defining the output structure
76+
:return: StructuredResponse containing the structured data
77+
"""
78+
try:
79+
langchain_messages = LangChainHelper.convert_messages(messages)
80+
structured_llm = self._llm.with_structured_output(response_structure)
81+
response = await structured_llm.ainvoke(langchain_messages)
82+
83+
if not isinstance(response, dict):
84+
log.warning(f'Structured output did not return a dict. Got: {type(response)}')
85+
return StructuredResponse(
86+
data={},
87+
raw_response='',
88+
metrics=LDAIMetrics(success=False, usage=TokenUsage(total=0, input=0, output=0)),
89+
)
90+
91+
return StructuredResponse(
92+
data=response,
93+
raw_response=str(response),
94+
metrics=LDAIMetrics(success=True, usage=TokenUsage(total=0, input=0, output=0)),
95+
)
96+
except Exception as error:
97+
log.warning(f'LangChain structured model invocation failed: {error}')
98+
return StructuredResponse(
99+
data={},
100+
raw_response='',
101+
metrics=LDAIMetrics(success=False, usage=TokenUsage(total=0, input=0, output=0)),
102+
)
103+

0 commit comments

Comments
 (0)