Skip to content

fix: Improve UTF-8 encoding and test reliability #4813

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 18 additions & 13 deletions scripts/docs_render_provider_snippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,10 +326,12 @@ def search_provider_mentions_in_examples(provider) -> dict[(str, str)]:
"""
provider_mentions = {}
for example in glob.glob("examples/**/workflows/**/*.y*ml", recursive=True):
with open(example, "r") as file:
with open(example, "r", encoding="utf-8") as file:
content = file.read()

file_name = os.path.relpath(example, "examples/workflows/")
# Normalize path separators for cross-platform compatibility
file_name = file_name.replace('\\', '/')

if provider.lower() == "keep":
# Special case for Keep provider because we use the word "keep" everywhere
Expand Down Expand Up @@ -361,14 +363,14 @@ def check_AutoGeneratedSnippet_in_provider_documentation_files():
"by AutoGeneratedSnippet tag validator, something went wrong."
)
for provider in providers:
with open(provider, "r") as f:
with open(provider, "r", encoding="utf-8") as f:
content = f.read()
if "AutoGeneratedSnippet" not in content:
missing_at.append(provider)
if "AutoGeneratedSnippet" not in content:
missing_at.append(provider)
return missing_at


documentation_template = """{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py
documentation_template = """{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py
Do not edit it manually, as it will be overwritten */}
{% if provider_data['auth'].items()|length == 0 %}{% else %}
## Authentication
Expand Down Expand Up @@ -435,8 +437,8 @@ def check_AutoGeneratedSnippet_in_provider_documentation_files():
{% if provider_data["provides_topology"] %}

## Topology
This provider pulls [topology](/overview/servicetopology) to Keep. It could be used in [correlations](/overview/correlation-topology)
and [mapping](/overview/enrichment/mapping#mapping-with-topology-data), and as a context
This provider pulls [topology](/overview/servicetopology) to Keep. It could be used in [correlations](/overview/correlation-topology)
and [mapping](/overview/enrichment/mapping#mapping-with-topology-data), and as a context
for [alerts](/alerts/sidebar#7-alert-topology-view) and [incidents](/overview#17-incident-topology).{% endif -%}
{% if provider_data["provider_methods"].items()|length > 0 %}

Expand Down Expand Up @@ -511,12 +513,13 @@ def main():
file_path = f"docs/snippets/providers/{provider_name}-snippet-autogenerated.mdx"
is_outdated = True
if os.path.exists(file_path):
with open(file_path, "r") as f:
with open(file_path, "r", encoding="utf-8") as f:
existing_content = f.read()
# remove all spaces and new lines while comparing
if existing_content.replace(" ", "").replace(
"\n", ""
) == template.replace(" ", "").replace("\n", ""):
# also normalize path separators for cross-platform compatibility
existing_normalized = existing_content.replace(" ", "").replace("\n", "").replace("\\", "/")
template_normalized = template.replace(" ", "").replace("\n", "").replace("\\", "/")
if existing_normalized == template_normalized:
is_outdated = False
else:
# render the difference using difflib
Expand All @@ -536,8 +539,10 @@ def main():
if is_outdated:
outdated_files.append(file_path)
if not args.validate:
with open(file_path, "w") as f:
f.write(template)
# Replace backslashes with forward slashes in URLs for cross-platform compatibility
fixed_content = template.replace('\\', '/')
with open(file_path, "w", encoding="utf-8") as f:
f.write(fixed_content)
# mintlify doesn't work nice with simultanious changes of multiple files
print(f"File {file_path} is outdated, updated.")
time.sleep(0.1)
Expand Down
56 changes: 41 additions & 15 deletions tests/e2e_tests/incidents_alerts_tests/incidents_alerts_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,24 +203,36 @@ def upload_alerts():
return current_alerts

attempt = 0
max_attempts = 30 # Increased from 10 to allow more time for alerts to be processed
while True:
time.sleep(1)
# Increased from 1 to 2 seconds to reduce the frequency of retry attempts
# This gives the system more time to process alerts between attempts,
# which helps reduce flaky test failures, especially in CI environments
time.sleep(2)
current_alerts = query_alerts(limit=1000, offset=0)
attempt += 1

if all(
simluated_alert["alertName"] in current_alerts["grouped_by_name"]
for _, simluated_alert in simulated_alerts
):
# Check which alerts are still missing
missing_alerts = []
for provider_type, simluated_alert in simulated_alerts:
if simluated_alert["alertName"] not in current_alerts["grouped_by_name"]:
missing_alerts.append((provider_type, simluated_alert))

if not missing_alerts:
print(f"All alerts uploaded successfully after {attempt} attempts")
break

if attempt >= 10:
if attempt >= max_attempts:
# Print more debugging information
print(f"Current alerts in system: {list(current_alerts['grouped_by_name'].keys())}")
print(f"Missing alerts: {[alert['alertName'] for _, alert in missing_alerts]}")

raise Exception(
"Not all alerts were uploaded. Not uploaded alerts: "
f"Not all alerts were uploaded after {max_attempts} attempts. Missing alerts: "
+ str(
[
f"{provider_type}: {alert['alertName']}"
for provider_type, alert in not_uploaded_alerts
for provider_type, alert in missing_alerts
]
)
)
Expand Down Expand Up @@ -391,20 +403,34 @@ def upload_incidents():
return current_incidents

attempt = 0
# Increased from 10 to 30 to allow more time for incidents to be processed
# This helps prevent test failures in slower CI environments
max_attempts = 30
while True:
# Increased from 1 to 2 seconds to reduce the frequency of retry attempts
# This gives the system more time to process incidents between attempts,
# which helps reduce flaky test failures, especially in CI environments
time.sleep(2)
current_incidents = query_incidents(limit=1000, offset=0)
attempt += 1

if all(
simluated_incident["user_generated_name"]
in current_incidents["grouped_by_name"]
for simluated_incident in simulated_incidents
):
# Check which incidents are still missing
missing_incidents = []
for simluated_incident in simulated_incidents:
if simluated_incident["user_generated_name"] not in current_incidents["grouped_by_name"]:
missing_incidents.append(simluated_incident)

if not missing_incidents:
print(f"All incidents uploaded successfully after {attempt} attempts")
break

if attempt >= 10:
if attempt >= max_attempts:
# Print more debugging information
print(f"Current incidents in system: {list(current_incidents['grouped_by_name'].keys())}")
print(f"Missing incidents: {[incident['user_generated_name'] for incident in missing_incidents]}")

raise Exception(
f"Not all incidents were uploaded. Not uploaded incidents: {not_uploaded_incidents}"
f"Not all incidents were uploaded after {max_attempts} attempts. Missing incidents: {missing_incidents}"
)
time.sleep(1)

Expand Down