Current Path: > > opt > hc_python > > lib > python3.12 > > site-packages > sentry_sdk > integrations
Operation : Linux premium131.web-hosting.com 4.18.0-553.44.1.lve.el8.x86_64 #1 SMP Thu Mar 13 14:29:12 UTC 2025 x86_64 Software : Apache Server IP : 162.0.232.56 | Your IP: 216.73.216.111 Domains : 1034 Domain(s) Permission : [ 0755 ]
Name | Type | Size | Last Modified | Actions |
---|---|---|---|---|
__pycache__ | Directory | - | - | |
celery | Directory | - | - | |
django | Directory | - | - | |
grpc | Directory | - | - | |
opentelemetry | Directory | - | - | |
redis | Directory | - | - | |
spark | Directory | - | - | |
__init__.py | File | 10218 bytes | May 23 2025 10:34:44. | |
_asgi_common.py | File | 3187 bytes | May 23 2025 10:34:44. | |
_wsgi_common.py | File | 7558 bytes | May 23 2025 10:34:44. | |
aiohttp.py | File | 12895 bytes | May 23 2025 10:34:44. | |
anthropic.py | File | 9426 bytes | May 23 2025 10:34:44. | |
argv.py | File | 911 bytes | May 23 2025 10:34:44. | |
ariadne.py | File | 5834 bytes | May 23 2025 10:34:44. | |
arq.py | File | 7857 bytes | May 23 2025 10:34:44. | |
asgi.py | File | 12779 bytes | May 23 2025 10:34:44. | |
asyncio.py | File | 4034 bytes | May 23 2025 10:34:44. | |
asyncpg.py | File | 6521 bytes | May 23 2025 10:34:44. | |
atexit.py | File | 1652 bytes | May 23 2025 10:34:44. | |
aws_lambda.py | File | 17954 bytes | May 23 2025 10:34:44. | |
beam.py | File | 5182 bytes | May 23 2025 10:34:44. | |
boto3.py | File | 4411 bytes | May 23 2025 10:34:44. | |
bottle.py | File | 6615 bytes | May 23 2025 10:34:44. | |
chalice.py | File | 4699 bytes | May 23 2025 10:34:44. | |
clickhouse_driver.py | File | 5247 bytes | May 23 2025 10:34:44. | |
cloud_resource_context.py | File | 7780 bytes | May 23 2025 10:34:44. | |
cohere.py | File | 9333 bytes | May 23 2025 10:34:44. | |
dedupe.py | File | 1418 bytes | May 23 2025 10:34:44. | |
dramatiq.py | File | 5583 bytes | May 23 2025 10:34:44. | |
excepthook.py | File | 2408 bytes | May 23 2025 10:34:44. | |
executing.py | File | 1994 bytes | May 23 2025 10:34:44. | |
falcon.py | File | 9501 bytes | May 23 2025 10:34:44. | |
fastapi.py | File | 4718 bytes | May 23 2025 10:34:44. | |
flask.py | File | 8740 bytes | May 23 2025 10:34:44. | |
gcp.py | File | 8274 bytes | May 23 2025 10:34:44. | |
gnu_backtrace.py | File | 2894 bytes | May 23 2025 10:34:44. | |
gql.py | File | 4179 bytes | May 23 2025 10:34:44. | |
graphene.py | File | 5042 bytes | May 23 2025 10:34:44. | |
httpx.py | File | 5866 bytes | May 23 2025 10:34:44. | |
huey.py | File | 5443 bytes | May 23 2025 10:34:44. | |
huggingface_hub.py | File | 6551 bytes | May 23 2025 10:34:44. | |
langchain.py | File | 17718 bytes | May 23 2025 10:34:44. | |
launchdarkly.py | File | 1935 bytes | May 23 2025 10:34:44. | |
litestar.py | File | 11569 bytes | May 23 2025 10:34:44. | |
logging.py | File | 13506 bytes | May 23 2025 10:34:44. | |
loguru.py | File | 3620 bytes | May 23 2025 10:34:44. | |
modules.py | File | 820 bytes | May 23 2025 10:34:44. | |
openai.py | File | 15585 bytes | May 23 2025 10:34:44. | |
openfeature.py | File | 1235 bytes | May 23 2025 10:34:44. | |
pure_eval.py | File | 4581 bytes | May 23 2025 10:34:44. | |
pymongo.py | File | 6380 bytes | May 23 2025 10:34:44. | |
pyramid.py | File | 7364 bytes | May 23 2025 10:34:44. | |
quart.py | File | 7437 bytes | May 23 2025 10:34:44. | |
ray.py | File | 4162 bytes | May 23 2025 10:34:44. | |
rq.py | File | 5307 bytes | May 23 2025 10:34:44. | |
rust_tracing.py | File | 9078 bytes | May 23 2025 10:34:44. | |
sanic.py | File | 12960 bytes | May 23 2025 10:34:44. | |
serverless.py | File | 1804 bytes | May 23 2025 10:34:44. | |
socket.py | File | 3169 bytes | May 23 2025 10:34:44. | |
sqlalchemy.py | File | 4372 bytes | May 23 2025 10:34:44. | |
starlette.py | File | 26413 bytes | May 23 2025 10:34:44. | |
starlite.py | File | 10620 bytes | May 23 2025 10:34:44. | |
statsig.py | File | 1227 bytes | May 23 2025 10:34:44. | |
stdlib.py | File | 8831 bytes | May 23 2025 10:34:44. | |
strawberry.py | File | 14126 bytes | May 23 2025 10:34:44. | |
sys_exit.py | File | 2493 bytes | May 23 2025 10:34:44. | |
threading.py | File | 5392 bytes | May 23 2025 10:34:44. | |
tornado.py | File | 7222 bytes | May 23 2025 10:34:44. | |
trytond.py | File | 1651 bytes | May 23 2025 10:34:44. | |
typer.py | File | 1815 bytes | May 23 2025 10:34:44. | |
unleash.py | File | 1058 bytes | May 23 2025 10:34:44. | |
wsgi.py | File | 10747 bytes | May 23 2025 10:34:44. |
from functools import wraps import sentry_sdk from sentry_sdk import consts from sentry_sdk.ai.monitoring import record_token_usage from sentry_sdk.ai.utils import set_data_normalized from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import ( capture_internal_exceptions, event_from_exception, ) from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Any, Iterable, List, Optional, Callable, AsyncIterator, Iterator from sentry_sdk.tracing import Span try: from openai.resources.chat.completions import Completions, AsyncCompletions from openai.resources import Embeddings, AsyncEmbeddings if TYPE_CHECKING: from openai.types.chat import ChatCompletionMessageParam, ChatCompletionChunk except ImportError: raise DidNotEnable("OpenAI not installed") class OpenAIIntegration(Integration): identifier = "openai" origin = f"auto.ai.{identifier}" def __init__(self, include_prompts=True, tiktoken_encoding_name=None): # type: (OpenAIIntegration, bool, Optional[str]) -> None self.include_prompts = include_prompts self.tiktoken_encoding = None if tiktoken_encoding_name is not None: import tiktoken # type: ignore self.tiktoken_encoding = tiktoken.get_encoding(tiktoken_encoding_name) @staticmethod def setup_once(): # type: () -> None Completions.create = _wrap_chat_completion_create(Completions.create) Embeddings.create = _wrap_embeddings_create(Embeddings.create) AsyncCompletions.create = _wrap_async_chat_completion_create( AsyncCompletions.create ) AsyncEmbeddings.create = _wrap_async_embeddings_create(AsyncEmbeddings.create) def count_tokens(self, s): # type: (OpenAIIntegration, str) -> int if self.tiktoken_encoding is not None: return len(self.tiktoken_encoding.encode_ordinary(s)) return 0 def _capture_exception(exc): # type: (Any) -> None event, hint = event_from_exception( exc, client_options=sentry_sdk.get_client().options, mechanism={"type": "openai", "handled": False}, ) sentry_sdk.capture_event(event, hint=hint) def _calculate_chat_completion_usage( messages, response, span, streaming_message_responses, count_tokens ): # type: (Iterable[ChatCompletionMessageParam], Any, Span, Optional[List[str]], Callable[..., Any]) -> None completion_tokens = 0 # type: Optional[int] prompt_tokens = 0 # type: Optional[int] total_tokens = 0 # type: Optional[int] if hasattr(response, "usage"): if hasattr(response.usage, "completion_tokens") and isinstance( response.usage.completion_tokens, int ): completion_tokens = response.usage.completion_tokens if hasattr(response.usage, "prompt_tokens") and isinstance( response.usage.prompt_tokens, int ): prompt_tokens = response.usage.prompt_tokens if hasattr(response.usage, "total_tokens") and isinstance( response.usage.total_tokens, int ): total_tokens = response.usage.total_tokens if prompt_tokens == 0: for message in messages: if "content" in message: prompt_tokens += count_tokens(message["content"]) if completion_tokens == 0: if streaming_message_responses is not None: for message in streaming_message_responses: completion_tokens += count_tokens(message) elif hasattr(response, "choices"): for choice in response.choices: if hasattr(choice, "message"): completion_tokens += count_tokens(choice.message) if prompt_tokens == 0: prompt_tokens = None if completion_tokens == 0: completion_tokens = None if total_tokens == 0: total_tokens = None record_token_usage(span, prompt_tokens, completion_tokens, total_tokens) def _new_chat_completion_common(f, *args, **kwargs): # type: (Any, *Any, **Any) -> Any integration = sentry_sdk.get_client().get_integration(OpenAIIntegration) if integration is None: return f(*args, **kwargs) if "messages" not in kwargs: # invalid call (in all versions of openai), let it return error return f(*args, **kwargs) try: iter(kwargs["messages"]) except TypeError: # invalid call (in all versions), messages must be iterable return f(*args, **kwargs) kwargs["messages"] = list(kwargs["messages"]) messages = kwargs["messages"] model = kwargs.get("model") streaming = kwargs.get("stream") span = sentry_sdk.start_span( op=consts.OP.OPENAI_CHAT_COMPLETIONS_CREATE, name="Chat Completion", origin=OpenAIIntegration.origin, ) span.__enter__() res = yield f, args, kwargs with capture_internal_exceptions(): if should_send_default_pii() and integration.include_prompts: set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, messages) set_data_normalized(span, SPANDATA.AI_MODEL_ID, model) set_data_normalized(span, SPANDATA.AI_STREAMING, streaming) if hasattr(res, "choices"): if should_send_default_pii() and integration.include_prompts: set_data_normalized( span, SPANDATA.AI_RESPONSES, list(map(lambda x: x.message, res.choices)), ) _calculate_chat_completion_usage( messages, res, span, None, integration.count_tokens ) span.__exit__(None, None, None) elif hasattr(res, "_iterator"): data_buf: list[list[str]] = [] # one for each choice old_iterator = res._iterator def new_iterator(): # type: () -> Iterator[ChatCompletionChunk] with capture_internal_exceptions(): for x in old_iterator: if hasattr(x, "choices"): choice_index = 0 for choice in x.choices: if hasattr(choice, "delta") and hasattr( choice.delta, "content" ): content = choice.delta.content if len(data_buf) <= choice_index: data_buf.append([]) data_buf[choice_index].append(content or "") choice_index += 1 yield x if len(data_buf) > 0: all_responses = list( map(lambda chunk: "".join(chunk), data_buf) ) if should_send_default_pii() and integration.include_prompts: set_data_normalized( span, SPANDATA.AI_RESPONSES, all_responses ) _calculate_chat_completion_usage( messages, res, span, all_responses, integration.count_tokens, ) span.__exit__(None, None, None) async def new_iterator_async(): # type: () -> AsyncIterator[ChatCompletionChunk] with capture_internal_exceptions(): async for x in old_iterator: if hasattr(x, "choices"): choice_index = 0 for choice in x.choices: if hasattr(choice, "delta") and hasattr( choice.delta, "content" ): content = choice.delta.content if len(data_buf) <= choice_index: data_buf.append([]) data_buf[choice_index].append(content or "") choice_index += 1 yield x if len(data_buf) > 0: all_responses = list( map(lambda chunk: "".join(chunk), data_buf) ) if should_send_default_pii() and integration.include_prompts: set_data_normalized( span, SPANDATA.AI_RESPONSES, all_responses ) _calculate_chat_completion_usage( messages, res, span, all_responses, integration.count_tokens, ) span.__exit__(None, None, None) if str(type(res._iterator)) == "<class 'async_generator'>": res._iterator = new_iterator_async() else: res._iterator = new_iterator() else: set_data_normalized(span, "unknown_response", True) span.__exit__(None, None, None) return res def _wrap_chat_completion_create(f): # type: (Callable[..., Any]) -> Callable[..., Any] def _execute_sync(f, *args, **kwargs): # type: (Any, *Any, **Any) -> Any gen = _new_chat_completion_common(f, *args, **kwargs) try: f, args, kwargs = next(gen) except StopIteration as e: return e.value try: try: result = f(*args, **kwargs) except Exception as e: _capture_exception(e) raise e from None return gen.send(result) except StopIteration as e: return e.value @wraps(f) def _sentry_patched_create_sync(*args, **kwargs): # type: (*Any, **Any) -> Any integration = sentry_sdk.get_client().get_integration(OpenAIIntegration) if integration is None or "messages" not in kwargs: # no "messages" means invalid call (in all versions of openai), let it return error return f(*args, **kwargs) return _execute_sync(f, *args, **kwargs) return _sentry_patched_create_sync def _wrap_async_chat_completion_create(f): # type: (Callable[..., Any]) -> Callable[..., Any] async def _execute_async(f, *args, **kwargs): # type: (Any, *Any, **Any) -> Any gen = _new_chat_completion_common(f, *args, **kwargs) try: f, args, kwargs = next(gen) except StopIteration as e: return await e.value try: try: result = await f(*args, **kwargs) except Exception as e: _capture_exception(e) raise e from None return gen.send(result) except StopIteration as e: return e.value @wraps(f) async def _sentry_patched_create_async(*args, **kwargs): # type: (*Any, **Any) -> Any integration = sentry_sdk.get_client().get_integration(OpenAIIntegration) if integration is None or "messages" not in kwargs: # no "messages" means invalid call (in all versions of openai), let it return error return await f(*args, **kwargs) return await _execute_async(f, *args, **kwargs) return _sentry_patched_create_async def _new_embeddings_create_common(f, *args, **kwargs): # type: (Any, *Any, **Any) -> Any integration = sentry_sdk.get_client().get_integration(OpenAIIntegration) if integration is None: return f(*args, **kwargs) with sentry_sdk.start_span( op=consts.OP.OPENAI_EMBEDDINGS_CREATE, description="OpenAI Embedding Creation", origin=OpenAIIntegration.origin, ) as span: if "input" in kwargs and ( should_send_default_pii() and integration.include_prompts ): if isinstance(kwargs["input"], str): set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, [kwargs["input"]]) elif ( isinstance(kwargs["input"], list) and len(kwargs["input"]) > 0 and isinstance(kwargs["input"][0], str) ): set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, kwargs["input"]) if "model" in kwargs: set_data_normalized(span, SPANDATA.AI_MODEL_ID, kwargs["model"]) response = yield f, args, kwargs prompt_tokens = 0 total_tokens = 0 if hasattr(response, "usage"): if hasattr(response.usage, "prompt_tokens") and isinstance( response.usage.prompt_tokens, int ): prompt_tokens = response.usage.prompt_tokens if hasattr(response.usage, "total_tokens") and isinstance( response.usage.total_tokens, int ): total_tokens = response.usage.total_tokens if prompt_tokens == 0: prompt_tokens = integration.count_tokens(kwargs["input"] or "") record_token_usage(span, prompt_tokens, None, total_tokens or prompt_tokens) return response def _wrap_embeddings_create(f): # type: (Any) -> Any def _execute_sync(f, *args, **kwargs): # type: (Any, *Any, **Any) -> Any gen = _new_embeddings_create_common(f, *args, **kwargs) try: f, args, kwargs = next(gen) except StopIteration as e: return e.value try: try: result = f(*args, **kwargs) except Exception as e: _capture_exception(e) raise e from None return gen.send(result) except StopIteration as e: return e.value @wraps(f) def _sentry_patched_create_sync(*args, **kwargs): # type: (*Any, **Any) -> Any integration = sentry_sdk.get_client().get_integration(OpenAIIntegration) if integration is None: return f(*args, **kwargs) return _execute_sync(f, *args, **kwargs) return _sentry_patched_create_sync def _wrap_async_embeddings_create(f): # type: (Any) -> Any async def _execute_async(f, *args, **kwargs): # type: (Any, *Any, **Any) -> Any gen = _new_embeddings_create_common(f, *args, **kwargs) try: f, args, kwargs = next(gen) except StopIteration as e: return await e.value try: try: result = await f(*args, **kwargs) except Exception as e: _capture_exception(e) raise e from None return gen.send(result) except StopIteration as e: return e.value @wraps(f) async def _sentry_patched_create_async(*args, **kwargs): # type: (*Any, **Any) -> Any integration = sentry_sdk.get_client().get_integration(OpenAIIntegration) if integration is None: return await f(*args, **kwargs) return await _execute_async(f, *args, **kwargs) return _sentry_patched_create_async
SILENT KILLER Tool