35
35
logger = logging .getLogger (__name__ )
36
36
37
37
38
- class CloudWatchEMFExporter (MetricExporter ):
38
+ class AwsCloudWatchEMFExporter (MetricExporter ):
39
39
"""
40
40
OpenTelemetry metrics exporter for CloudWatch EMF format.
41
41
42
42
This exporter converts OTel metrics into CloudWatch EMF logs which are then
43
43
sent to CloudWatch Logs. CloudWatch Logs automatically extracts the metrics
44
44
from the EMF logs.
45
+
46
+ https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html
47
+
45
48
"""
46
49
47
50
# OTel to CloudWatch unit mapping
51
+ # Ref: opentelemetry-collector-contrib/blob/main/exporter/awsemfexporter/grouped_metric.go#L188
48
52
UNIT_MAPPING = {
53
+ "1" : "" ,
54
+ "ns" : "" ,
49
55
"ms" : "Milliseconds" ,
50
56
"s" : "Seconds" ,
51
57
"us" : "Microseconds" ,
52
- "ns" : "Nanoseconds" ,
53
58
"By" : "Bytes" ,
54
- "KiBy" : "Kilobytes" ,
55
- "MiBy" : "Megabytes" ,
56
- "GiBy" : "Gigabytes" ,
57
- "TiBy" : "Terabytes" ,
58
- "Bi" : "Bits" ,
59
- "KiBi" : "Kilobits" ,
60
- "MiBi" : "Megabits" ,
61
- "GiBi" : "Gigabits" ,
62
- "TiBi" : "Terabits" ,
63
- "%" : "Percent" ,
64
- "1" : "Count" ,
65
- "{count}" : "Count" ,
59
+ "bit" : "Bits" ,
66
60
}
67
61
68
62
def __init__ (
@@ -102,6 +96,9 @@ def __init__(
102
96
# Ensure log group exists
103
97
self ._ensure_log_group_exists ()
104
98
99
+ # Ensure log stream exists
100
+ self ._ensure_log_stream_exists ()
101
+
105
102
def _generate_log_stream_name (self ) -> str :
106
103
"""Generate a unique log stream name."""
107
104
@@ -120,6 +117,17 @@ def _ensure_log_group_exists(self):
120
117
logger .error ("Failed to create log group %s : %s" , self .log_group_name , error )
121
118
raise
122
119
120
+ def _ensure_log_stream_exists (self ):
121
+ try :
122
+ self .logs_client .create_log_stream (logGroupName = self .log_group_name , logStreamName = self .log_stream_name )
123
+ logger .info ("Created log stream: %s" , self .log_stream_name )
124
+ except ClientError as error :
125
+ if error .response .get ("Error" , {}).get ("Code" ) == "ResourceAlreadyExistsException" :
126
+ logger .debug ("Log stream %s already exists" , self .log_stream_name )
127
+ else :
128
+ logger .error ("Failed to create log stream %s : %s" , self .log_group_name , error )
129
+ raise
130
+
123
131
def _get_metric_name (self , record : Any ) -> Optional [str ]:
124
132
"""Get the metric name from the metric record or data point."""
125
133
# For compatibility with older record format
@@ -250,20 +258,23 @@ def _create_emf_log(self, metric_records: List[Any], resource: Resource, timesta
250
258
emf_log ["Version" ] = "1"
251
259
252
260
# Add resource attributes to EMF log but not as dimensions
261
+
253
262
if resource and resource .attributes :
254
263
for key , value in resource .attributes .items ():
255
- emf_log [f"resource.{ key } " ] = str (value )
264
+ emf_log [f"otel. resource.{ key } " ] = str (value )
256
265
257
266
# Initialize collections for dimensions and metrics
258
- all_attributes = {}
267
+
259
268
metric_definitions = []
260
269
270
+ # Collect attributes from all records (they should be the same for all records in the group)
271
+ # Only collect once from the first record and apply to all records
272
+ all_attributes = (metric_records [0 ].attributes
273
+ if metric_records and hasattr (metric_records [0 ], "attributes" ) and metric_records [0 ].attributes
274
+ else {})
275
+
261
276
# Process each metric record
262
277
for record in metric_records :
263
- # Collect attributes from all records (they should be the same for all records in the group)
264
- if hasattr (record , "attributes" ) and record .attributes :
265
- for key , value in record .attributes .items ():
266
- all_attributes [key ] = value
267
278
268
279
metric_name = self ._get_metric_name (record )
269
280
@@ -279,9 +290,11 @@ def _create_emf_log(self, metric_records: List[Any], resource: Resource, timesta
279
290
metric_data ["Unit" ] = unit
280
291
281
292
# Process gauge metrics (only type supported in PR 1)
282
- if hasattr (record , "value" ):
293
+ if not hasattr (record , "value" ):
283
294
# Store value directly in emf_log
284
- emf_log [metric_name ] = record .value
295
+ logger .debug ("Skipping metric %s as it does not have valid metric value" , metric_name )
296
+
297
+ emf_log [metric_name ] = record .value
285
298
286
299
# Add to metric definitions list
287
300
metric_definitions .append ({"Name" : metric_name , ** metric_data })
@@ -307,6 +320,8 @@ def _send_log_event(self, log_event: Dict[str, Any]):
307
320
Send a log event to CloudWatch Logs.
308
321
309
322
Basic implementation for PR 1 - sends individual events directly.
323
+
324
+ TODO: Batching event and follow CloudWatch Logs quato constraints - number of events & size limit per payload
310
325
"""
311
326
try :
312
327
# Send the log event
@@ -354,15 +369,16 @@ def export(
354
369
if not (hasattr (metric , "data" ) and hasattr (metric .data , "data_points" )):
355
370
continue
356
371
357
- # Process only Gauge metrics in PR 1
358
- if isinstance (metric .data , Gauge ):
372
+ # Process metrics based on type
373
+ metric_type = type (metric .data )
374
+ if metric_type == Gauge :
359
375
for dp in metric .data .data_points :
360
376
record , timestamp_ms = self ._convert_gauge (metric , dp )
361
377
grouped_metrics [self ._group_by_attributes_and_timestamp (record , timestamp_ms )].append (
362
378
record
363
379
)
364
380
else :
365
- logger .warning ("Unsupported Metric Type: %s" , type ( metric . data ) )
381
+ logger .debug ("Unsupported Metric Type: %s" , metric_type )
366
382
367
383
# Now process each group separately to create one EMF log per group
368
384
for (_ , timestamp_ms ), metric_records in grouped_metrics .items ():
@@ -390,27 +406,31 @@ def force_flush(self, timeout_millis: int = 10000) -> bool:
390
406
"""
391
407
Force flush any pending metrics.
392
408
409
+ TODO: will add logic to handle gracefule shutdown
410
+
393
411
Args:
394
412
timeout_millis: Timeout in milliseconds
395
413
396
414
Returns:
397
415
True if successful, False otherwise
398
416
"""
399
- logger .debug ("CloudWatchEMFExporter force flushes the buffered metrics" )
417
+ logger .debug ("AWsCloudWatchEMFExporter force flushes the buffered metrics" )
400
418
return True
401
419
402
420
def shutdown (self , timeout_millis : Optional [int ] = None , ** kwargs : Any ) -> bool :
403
421
"""
404
422
Shutdown the exporter.
405
423
Override to handle timeout and other keyword arguments, but do nothing.
406
424
425
+ TODO: will add logic to handle gracefule shutdown
426
+
407
427
Args:
408
428
timeout_millis: Ignored timeout in milliseconds
409
429
**kwargs: Ignored additional keyword arguments
410
430
"""
411
431
# Intentionally do nothing
412
432
self .force_flush (timeout_millis )
413
- logger .debug ("CloudWatchEMFExporter shutdown called with timeout_millis=%s" , timeout_millis )
433
+ logger .debug ("AwsCloudWatchEMFExporter shutdown called with timeout_millis=%s" , timeout_millis )
414
434
return True
415
435
416
436
@@ -420,7 +440,7 @@ def create_emf_exporter(
420
440
log_stream_name : Optional [str ] = None ,
421
441
aws_region : Optional [str ] = None ,
422
442
** kwargs ,
423
- ) -> CloudWatchEMFExporter :
443
+ ) -> AwsCloudWatchEMFExporter :
424
444
"""
425
445
Convenience function to create a CloudWatch EMF exporter with DELTA temporality.
426
446
@@ -430,10 +450,10 @@ def create_emf_exporter(
430
450
log_stream_name: CloudWatch log stream name (auto-generated if None)
431
451
aws_region: AWS region (auto-detected if None)
432
452
debug: Whether to enable debug printing of EMF logs
433
- **kwargs: Additional arguments passed to the CloudWatchEMFExporter
453
+ **kwargs: Additional arguments passed to the AwsCloudWatchEMFExporter
434
454
435
455
Returns:
436
- Configured CloudWatchEMFExporter instance
456
+ Configured AwsCloudWatchEMFExporter instance
437
457
"""
438
458
439
459
# Set up temporality preference - always use DELTA for CloudWatch
@@ -447,7 +467,7 @@ def create_emf_exporter(
447
467
}
448
468
449
469
# Create and return the exporter
450
- return CloudWatchEMFExporter (
470
+ return AwsCloudWatchEMFExporter (
451
471
namespace = namespace ,
452
472
log_group_name = log_group_name ,
453
473
log_stream_name = log_stream_name ,
0 commit comments