aws_metrics_constants.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. """
  2. Copyright (c) Contributors to the Open 3D Engine Project.
  3. For complete copyright and license terms please see the LICENSE at the root of this distribution.
  4. SPDX-License-Identifier: Apache-2.0 OR MIT
  5. """
  6. # Constants for API Gateway Service API
  7. APIGATEWAY_STAGE = "api"
  8. # Constants for Kinesis Data Analytics application
  9. # The SQL code for the Kinesis analytics application.
  10. KINESIS_APPLICATION_CODE = "-- ** Continuous Filter **\n"\
  11. "CREATE OR REPLACE STREAM \"DESTINATION_STREAM\" (\n"\
  12. "METRIC_NAME VARCHAR(1024),\n"\
  13. "METRIC_TIMESTAMP BIGINT,\n"\
  14. "METRIC_UNIT_VALUE_INT BIGINT,\n"\
  15. "METRIC_UNIT VARCHAR(1024),\n"\
  16. "OUTPUT_TYPE VARCHAR(1024));\n"\
  17. "CREATE OR REPLACE PUMP \"LOGIN_PUMP\" AS\n"\
  18. "INSERT INTO \"DESTINATION_STREAM\" (METRIC_NAME, METRIC_TIMESTAMP, METRIC_UNIT_VALUE_INT, METRIC_UNIT, OUTPUT_TYPE)\n"\
  19. "SELECT STREAM 'TotalLogins', UNIX_TIMESTAMP(TIME_WINDOW), COUNT(distinct_stream.login_count) AS unique_count, 'Count', 'metrics'\n"\
  20. "FROM (\n"\
  21. " SELECT STREAM DISTINCT\n"\
  22. " ROWTIME as window_time,\n"\
  23. " \"AnalyticsApp_001\".\"event_id\" as login_count,\n"\
  24. " STEP(\"AnalyticsApp_001\".ROWTIME BY INTERVAL '1' MINUTE) as TIME_WINDOW\n"\
  25. " FROM \"AnalyticsApp_001\"\n"\
  26. " WHERE \"AnalyticsApp_001\".\"event_name\" = 'login'\n"\
  27. ") as distinct_stream\n"\
  28. "GROUP BY\n"\
  29. " TIME_WINDOW,\n"\
  30. " STEP(distinct_stream.window_time BY INTERVAL '1' MINUTE);\n"
  31. # Constants for the analytics processing and events processing lambda.
  32. LAMBDA_TIMEOUT_IN_MINUTES = 5
  33. # The amount of memory available to the function at runtime. Range from 128 to 10240.
  34. # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-function.html#cfn-lambda-function-memorysize
  35. LAMBDA_MEMORY_SIZE_IN_MB: int = 256
  36. # Constants for the Glue database and table.
  37. GLUE_TABLE_NAME = 'firehose_events'
  38. # Input/output format and serialization library for the Glue table (Same as the Game Analytics Pipeline solution).
  39. # The Firehose delivery stream will use this table to convert the metrics data to the parquet format.
  40. # Check https://docs.aws.amazon.com/firehose/latest/dev/record-format-conversion.html for converting
  41. # the input record format in Kinesis Data Firehose.
  42. GLUE_TABLE_INPUT_FORMAT = 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
  43. GLUE_TABLE_OUTPUT_FORMAT = 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
  44. GLUE_TABLE_SERIALIZATION_LIBRARY = 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
  45. GLUE_TABLE_SERIALIZATION_LIBRARY_SERIALIZATION_FORMAT = '1'
  46. # Constants for the Glue crawler.
  47. CRAWLER_CONFIGURATION = '{'\
  48. ' \"Version\":1.0,'\
  49. ' \"CrawlerOutput\":'\
  50. ' {'\
  51. ' \"Partitions\":'\
  52. ' {'\
  53. ' \"AddOrUpdateBehavior\":\"InheritFromTable\"'\
  54. ' },'\
  55. ' \"Tables\":'\
  56. ' {'\
  57. ' \"AddOrUpdateBehavior\":\"MergeNewColumns\"'\
  58. ' }'\
  59. ' }'\
  60. '}'\
  61. # Constants for the Kinesis Data Firehose delivery stream.
  62. # Hints for the buffering to perform before delivering data to the destination.
  63. # These options are treated as hints, and therefore Kinesis Data Firehose might choose to
  64. # use different values when it is optimal.
  65. DELIVERY_STREAM_BUFFER_HINTS_INTERVAL_IN_SECONDS = 60
  66. DELIVERY_STREAM_BUFFER_HINTS_SIZE_IN_MBS = 128
  67. # Configuration for the destination S3 bucket.
  68. S3_DESTINATION_PREFIX = GLUE_TABLE_NAME + '/year=!{timestamp:YYYY}/month=!{timestamp:MM}/day=!{timestamp:dd}/'
  69. S3_DESTINATION_ERROR_OUTPUT_PREFIX = 'firehose_errors/year=!{timestamp:YYYY}/month=!{timestamp:MM}/' \
  70. 'day=!{timestamp:dd}/!{firehose:error-output-type}'
  71. # Parquet format is already compressed with SNAPPY
  72. S3_COMPRESSION_FORMAT = 'UNCOMPRESSED'
  73. # Configuration for the data processor for an Amazon Kinesis Data Firehose delivery stream.
  74. # Set the length of time that Data Firehose buffers incoming data before delivering it to the destination. Valid Range:
  75. # Minimum value of 60. Maximum value of 900.
  76. # https://docs.aws.amazon.com/firehose/latest/APIReference/API_BufferingHints.html
  77. PROCESSOR_BUFFER_INTERVAL_IN_SECONDS = '60'
  78. # Buffer incoming data to the specified size before delivering it to the destination. Recommend setting this
  79. # parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds.
  80. # https://docs.aws.amazon.com/firehose/latest/APIReference/API_BufferingHints.html
  81. PROCESSOR_BUFFER_SIZE_IN_MBS = '3'
  82. # Number of retries for delivering the data to S3.
  83. # Minimum value of 1. Maximum value of 512.
  84. # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-processorparameter.html
  85. PROCESSOR_BUFFER_NUM_OF_RETRIES = '3'
  86. PARQUET_SER_DE_COMPRESSION = 'SNAPPY'
  87. # Constants for the Athena engine
  88. # Directory name in S3 for the Athena query outputs
  89. ATHENA_OUTPUT_DIRECTORY = 'athena_query_results'
  90. # Constants for the CloudWatch dashboard
  91. # The start of the time range to use for each widget on the dashboard. Set a 15 minutes view.
  92. DASHBOARD_TIME_RANGE_START = '-PT15M'
  93. # The maximum amount of horizontal grid units the widget will take up.
  94. DASHBOARD_MAX_WIDGET_WIDTH = 24
  95. # The amount of vertical grid units the global description widget will take up.
  96. DASHBOARD_GLOBAL_DESCRIPTION_WIDGET_HEIGHT = 3
  97. # The time period used for metric data aggregations.
  98. DASHBOARD_METRICS_TIME_PERIOD = 1
  99. # The global description for the CloudWatch dashboard
  100. DASHBOARD_GLOBAL_DESCRIPTION = "# Metrics Dashboard \n"\
  101. "This dashboard contains near-real-time metrics sent from your client"\
  102. " or dedicated server. \n You can edit the widgets using the AWS console"\
  103. " or modify your CDK application code. Please note that redeploying"\
  104. " the CDK application will overwrite any changes you made directly"\
  105. " via the AWS console. \n For more information about using the AWS Metrics Gem"\
  106. " and CDK application, please check the AWSMetrics gem document."
  107. # The description for the operational health shown on the CloudWatch dashboard
  108. DASHBOARD_OPERATIONAL_HEALTH_DESCRIPTION = "## Operational Health \n"\
  109. "This section covers operational metrics for the data analytics pipeline "\
  110. "during metrics event ingestion, processing and analytics."
  111. # The description for the real time analytics shown on the CloudWatch dashboard
  112. DASHBOARD_REAL_TIME_ANALYTICS_DESCRIPTION = "## Real-time Streaming Analytics \n"\
  113. "This section covers real-time analytics metrics sent " \
  114. "to the data analytics pipeline."