Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 73af855

Browse files
authoredFeb 18, 2025··
Merge pull request #11848 from DefectDojo/release/2.43.3
Release: Merge release into master from: release/2.43.3
2 parents 31f0be8 + ba42d83 commit 73af855

15 files changed

+752
-72
lines changed
 

‎components/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "defectdojo",
3-
"version": "2.43.2",
3+
"version": "2.43.3",
44
"license" : "BSD-3-Clause",
55
"private": true,
66
"dependencies": {

‎dojo/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,6 @@
44
# Django starts so that shared_task will use this app.
55
from .celery import app as celery_app # noqa: F401
66

7-
__version__ = "2.43.2"
7+
__version__ = "2.43.3"
88
__url__ = "https://github.com/DefectDojo/django-DefectDojo"
99
__docs__ = "https://documentation.defectdojo.com"

‎dojo/api_v2/serializers.py

Lines changed: 26 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ class ImportStatisticsSerializer(serializers.Serializer):
167167
)
168168
delta = DeltaStatisticsSerializer(
169169
required=False,
170-
help_text="Finding statistics of modifications made by the reimport. Only available when TRACK_IMPORT_HISTORY hass not disabled.",
170+
help_text="Finding statistics of modifications made by the reimport. Only available when TRACK_IMPORT_HISTORY has not been disabled.",
171171
)
172172
after = SeverityStatusStatisticsSerializer(
173173
help_text="Finding statistics as stored in Defect Dojo after the import",
@@ -1920,43 +1920,42 @@ class Meta:
19201920

19211921
# Overriding this to push add Push to JIRA functionality
19221922
def create(self, validated_data):
1923-
# remove tags from validated data and store them seperately
1923+
# Pop off of some fields that should not be sent to the model at this time
19241924
to_be_tagged, validated_data = self._pop_tags(validated_data)
1925-
1926-
# pop push_to_jira so it won't get send to the model as a field
1927-
push_to_jira = validated_data.pop("push_to_jira")
1928-
1929-
# Save vulnerability ids and pop them
1930-
if "vulnerability_id_set" in validated_data:
1931-
vulnerability_id_set = validated_data.pop("vulnerability_id_set")
1932-
else:
1933-
vulnerability_id_set = None
1934-
1935-
# first save, so we have an instance to get push_all_to_jira from
1936-
new_finding = super(TaggitSerializer, self).create(validated_data)
1937-
1938-
if vulnerability_id_set:
1939-
vulnerability_ids = []
1940-
for vulnerability_id in vulnerability_id_set:
1941-
vulnerability_ids.append(vulnerability_id["vulnerability_id"])
1942-
validated_data["cve"] = vulnerability_ids[0]
1943-
save_vulnerability_ids(new_finding, vulnerability_ids)
1944-
new_finding.save()
1945-
1925+
push_to_jira = validated_data.pop("push_to_jira", False)
1926+
notes = validated_data.pop("notes", None)
1927+
found_by = validated_data.pop("found_by", None)
1928+
reviewers = validated_data.pop("reviewers", None)
1929+
# Process the vulnerability IDs specially
1930+
parsed_vulnerability_ids = []
1931+
if (vulnerability_ids := validated_data.pop("vulnerability_id_set", None)):
1932+
for vulnerability_id in vulnerability_ids:
1933+
parsed_vulnerability_ids.append(vulnerability_id["vulnerability_id"])
1934+
validated_data["cve"] = parsed_vulnerability_ids[0]
1935+
# Create a findings in memory so that we have access to unsaved_vulnerability_ids
1936+
new_finding = Finding(**validated_data)
1937+
new_finding.unsaved_vulnerability_ids = parsed_vulnerability_ids
1938+
new_finding.save()
1939+
# Deal with all of the many to many things
1940+
if notes:
1941+
new_finding.notes.set(notes)
1942+
if found_by:
1943+
new_finding.found_by.set(found_by)
1944+
if reviewers:
1945+
new_finding.reviewers.set(reviewers)
1946+
if parsed_vulnerability_ids:
1947+
save_vulnerability_ids(new_finding, parsed_vulnerability_ids)
19461948
# TODO: JIRA can we remove this is_push_all_issues, already checked in
19471949
# apiv2 viewset?
19481950
push_to_jira = push_to_jira or jira_helper.is_push_all_issues(
19491951
new_finding,
19501952
)
1951-
19521953
# If we need to push to JIRA, an extra save call is needed.
19531954
# TODO: try to combine create and save, but for now I'm just fixing a
19541955
# bug and don't want to change to much
19551956
if push_to_jira or new_finding:
19561957
new_finding.save(push_to_jira=push_to_jira)
1957-
1958-
# not sure why we are returning a tag_object, but don't want to change
1959-
# too much now as we're just fixing a bug
1958+
# This final call will save the finding again and return it
19601959
return self._save_tags(new_finding, to_be_tagged)
19611960

19621961
def validate(self, data):

‎dojo/importers/base_importer.py

Lines changed: 49 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
from django.core.exceptions import ValidationError
66
from django.core.files.base import ContentFile
77
from django.core.files.uploadedfile import TemporaryUploadedFile
8+
from django.db import IntegrityError
89
from django.urls import reverse
910
from django.utils.timezone import make_aware
1011

@@ -262,6 +263,13 @@ def determine_process_method(
262263
**kwargs,
263264
)
264265

266+
def determine_deduplication_algorithm(self) -> str:
267+
"""
268+
Determines what dedupe algorithm to use for the Test being processed.
269+
:return: A string representing the dedupe algorithm to use.
270+
"""
271+
return self.test.deduplication_algorithm
272+
265273
def update_test_meta(self):
266274
"""
267275
Update the test with some values stored in the kwargs dict. The common
@@ -357,53 +365,80 @@ def update_import_history(
357365
commit_hash=self.commit_hash,
358366
type=self.import_type,
359367
)
360-
# Define all of the respective import finding actions for the test import object
361-
test_import_finding_action_list = []
368+
369+
# Create a history record for each finding
362370
for finding in closed_findings:
363-
logger.debug(f"preparing Test_Import_Finding_Action for closed finding: {finding.id}")
364-
test_import_finding_action_list.append(Test_Import_Finding_Action(
371+
self.create_import_history_record_safe(Test_Import_Finding_Action(
365372
test_import=test_import,
366373
finding=finding,
367374
action=IMPORT_CLOSED_FINDING,
368375
))
369376
for finding in new_findings:
370-
logger.debug(f"preparing Test_Import_Finding_Action for created finding: {finding.id}")
371-
test_import_finding_action_list.append(Test_Import_Finding_Action(
377+
self.create_import_history_record_safe(Test_Import_Finding_Action(
372378
test_import=test_import,
373379
finding=finding,
374380
action=IMPORT_CREATED_FINDING,
375381
))
376382
for finding in reactivated_findings:
377-
logger.debug(f"preparing Test_Import_Finding_Action for reactivated finding: {finding.id}")
378-
test_import_finding_action_list.append(Test_Import_Finding_Action(
383+
self.create_import_history_record_safe(Test_Import_Finding_Action(
379384
test_import=test_import,
380385
finding=finding,
381386
action=IMPORT_REACTIVATED_FINDING,
382387
))
383388
for finding in untouched_findings:
384-
logger.debug(f"preparing Test_Import_Finding_Action for untouched finding: {finding.id}")
385-
test_import_finding_action_list.append(Test_Import_Finding_Action(
389+
self.create_import_history_record_safe(Test_Import_Finding_Action(
386390
test_import=test_import,
387391
finding=finding,
388392
action=IMPORT_UNTOUCHED_FINDING,
389393
))
390-
# Bulk create all the defined objects
391-
Test_Import_Finding_Action.objects.bulk_create(test_import_finding_action_list)
392394

393395
# Add any tags to the findings imported if necessary
394396
if self.apply_tags_to_findings and self.tags:
395397
for finding in test_import.findings_affected.all():
396398
for tag in self.tags:
397-
finding.tags.add(tag)
399+
self.add_tags_safe(finding, tag)
398400
# Add any tags to any endpoints of the findings imported if necessary
399401
if self.apply_tags_to_endpoints and self.tags:
400402
for finding in test_import.findings_affected.all():
401403
for endpoint in finding.endpoints.all():
402404
for tag in self.tags:
403-
endpoint.tags.add(tag)
405+
self.add_tags_safe(endpoint, tag)
404406

405407
return test_import
406408

409+
def create_import_history_record_safe(
410+
self,
411+
test_import_finding_action,
412+
):
413+
"""Creates an import history record, while catching any IntegrityErrors that might happen because of the background job having deleted a finding"""
414+
logger.debug(f"creating Test_Import_Finding_Action for finding: {test_import_finding_action.finding.id} action: {test_import_finding_action.action}")
415+
try:
416+
test_import_finding_action.save()
417+
except IntegrityError as e:
418+
# This try catch makes us look we don't know what we're doing, but in https://github.com/DefectDojo/django-DefectDojo/issues/6217 we decided that for now this is the best solution
419+
logger.warning("Error creating Test_Import_Finding_Action: %s", e)
420+
logger.debug("Error creating Test_Import_Finding_Action, finding marked as duplicate and deleted ?")
421+
422+
def add_tags_safe(
423+
self,
424+
finding_or_endpoint,
425+
tag,
426+
):
427+
"""Adds tags to a finding or endpoint, while catching any IntegrityErrors that might happen because of the background job having deleted a finding"""
428+
if not isinstance(finding_or_endpoint, Finding) and not isinstance(finding_or_endpoint, Endpoint):
429+
msg = "finding_or_endpoint must be a Finding or Endpoint object"
430+
raise TypeError(msg)
431+
432+
msg = "finding" if isinstance(finding_or_endpoint, Finding) else "endpoint" if isinstance(finding_or_endpoint, Endpoint) else "unknown"
433+
logger.debug(f" adding tag: {tag} to " + msg + f"{finding_or_endpoint.id}")
434+
435+
try:
436+
finding_or_endpoint.tags.add(tag)
437+
except IntegrityError as e:
438+
# This try catch makes us look we don't know what we're doing, but in https://github.com/DefectDojo/django-DefectDojo/issues/6217 we decided that for now this is the best solution
439+
logger.warning("Error adding tag: %s", e)
440+
logger.debug("Error adding tag, finding marked as duplicate and deleted ?")
441+
407442
def construct_imported_message(
408443
self,
409444
finding_count: int = 0,

‎dojo/importers/default_importer.py

Lines changed: 33 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -254,29 +254,44 @@ def close_old_findings(
254254
# First check if close old findings is desired
255255
if not self.close_old_findings_toggle:
256256
return []
257-
logger.debug("REIMPORT_SCAN: Closing findings no longer present in scan report")
258-
# Close old active findings that are not reported by this scan.
259-
# Refactoring this to only call test.finding_set.values() once.
260-
findings = findings.values()
261-
mitigated_hash_codes = []
257+
258+
logger.debug("IMPORT_SCAN: Closing findings no longer present in scan report")
259+
# Remove all the findings that are coming from the report already mitigated
262260
new_hash_codes = []
263-
for finding in findings:
264-
new_hash_codes.append(finding["hash_code"])
265-
if finding.get("is_mitigated", None):
266-
mitigated_hash_codes.append(finding["hash_code"])
267-
for hash_code in new_hash_codes:
268-
if hash_code == finding["hash_code"]:
269-
new_hash_codes.remove(hash_code)
261+
new_unique_ids_from_tool = []
262+
for finding in findings.values():
263+
# Do not process closed findings in the report
264+
if finding.get("is_mitigated", False):
265+
continue
266+
# Grab the hash code
267+
if (hash_code := finding.get("hash_code")) is not None:
268+
new_hash_codes.append(hash_code)
269+
if (unique_id_from_tool := finding.get("unique_id_from_tool")) is not None:
270+
new_unique_ids_from_tool.append(unique_id_from_tool)
270271
# Get the initial filtered list of old findings to be closed without
271272
# considering the scope of the product or engagement
272-
old_findings = Finding.objects.exclude(
273-
test=self.test,
274-
).exclude(
275-
hash_code__in=new_hash_codes,
276-
).filter(
273+
old_findings = Finding.objects.filter(
277274
test__test_type=self.test.test_type,
278275
active=True,
279-
)
276+
).exclude(test=self.test)
277+
# Filter further based on the deduplication algorithm set on the test
278+
self.deduplication_algorithm = self.determine_deduplication_algorithm()
279+
if self.deduplication_algorithm in ["hash_code", "legacy"]:
280+
old_findings = old_findings.exclude(
281+
hash_code__in=new_hash_codes,
282+
)
283+
if self.deduplication_algorithm == "unique_id_from_tool":
284+
old_findings = old_findings.exclude(
285+
unique_id_from_tool__in=new_unique_ids_from_tool,
286+
)
287+
if self.deduplication_algorithm == "unique_id_from_tool_or_hash_code":
288+
old_findings = old_findings.exclude(
289+
(Q(hash_code__isnull=False) & Q(hash_code__in=new_hash_codes))
290+
| (
291+
Q(unique_id_from_tool__isnull=False)
292+
& Q(unique_id_from_tool__in=new_unique_ids_from_tool)
293+
),
294+
)
280295
# Accommodate for product scope or engagement scope
281296
if self.close_old_findings_product_scope:
282297
old_findings = old_findings.filter(test__engagement__product=self.test.engagement.product)

‎dojo/importers/default_reimporter.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -147,13 +147,6 @@ def process_scan(
147147
test_import_history,
148148
)
149149

150-
def determine_deduplication_algorithm(self) -> str:
151-
"""
152-
Determines what dedupe algorithm to use for the Test being processed.
153-
:return: A string representing the dedupe algorithm to use.
154-
"""
155-
return self.test.deduplication_algorithm
156-
157150
def process_findings(
158151
self,
159152
parsed_findings: list[Finding],

‎dojo/product/views.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1381,6 +1381,7 @@ def process_finding_form(self, request: HttpRequest, test: Test, context: dict):
13811381
finding.reporter = request.user
13821382
finding.numerical_severity = Finding.get_numerical_severity(finding.severity)
13831383
finding.tags = context["form"].cleaned_data["tags"]
1384+
finding.unsaved_vulnerability_ids = context["form"].cleaned_data["vulnerability_ids"].split()
13841385
finding.save()
13851386
# Save and add new endpoints
13861387
finding_helper.add_endpoints(finding, context["form"])

‎dojo/templates/base.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@
126126
<div class="custom-search-form">
127127
<form id="custom_search_form" role="search" method="get" action="{% url 'simple_search' %}">
128128
<div class="input-group">
129-
<input id="simple_search" label="simple_search" aria_label="simple_search" type="text" name="query" class="form-control"
129+
<input id="simple_search" label="simple_search" aria-label="simple_search" type="text" name="query" class="form-control"
130130
placeholder="{% trans "Search" %}..." value="{{clean_query}}">
131131
<span class="input-group-btn">
132132
<button id="simple_search_submit" class="btn btn-primary" type="submit" aria-label="Search">

‎dojo/test/views.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -538,6 +538,7 @@ def process_finding_form(self, request: HttpRequest, test: Test, context: dict):
538538
finding.reporter = request.user
539539
finding.numerical_severity = Finding.get_numerical_severity(finding.severity)
540540
finding.tags = context["form"].cleaned_data["tags"]
541+
finding.unsaved_vulnerability_ids = context["form"].cleaned_data["vulnerability_ids"].split()
541542
finding.save()
542543
# Save and add new endpoints
543544
finding_helper.add_endpoints(finding, context["form"])

‎helm/defectdojo/Chart.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
apiVersion: v2
2-
appVersion: "2.43.2"
2+
appVersion: "2.43.3"
33
description: A Helm chart for Kubernetes to install DefectDojo
44
name: defectdojo
5-
version: 1.6.173
5+
version: 1.6.174
66
icon: https://www.defectdojo.org/img/favicon.ico
77
maintainers:
88
- name: madchap

‎requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ Markdown==3.7
3232
openpyxl==3.1.5
3333
Pillow==11.1.0 # required by django-imagekit
3434
psycopg[c]==3.2.4
35-
cryptography==44.0.0
35+
cryptography==44.0.1
3636
python-dateutil==2.9.0.post0
3737
pytz==2025.1
3838
redis==5.2.1
Lines changed: 193 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,193 @@
1+
{
2+
"version": "1.97.0",
3+
"results": [
4+
{
5+
"check_id": "python.lang.security.audit.dangerous-subprocess-use-tainted-env-args.dangerous-subprocess-use-tainted-env-args",
6+
"path": "sample/brute.py",
7+
"start": {
8+
"line": 31,
9+
"col": 29,
10+
"offset": 285
11+
},
12+
"end": {
13+
"line": 31,
14+
"col": 58,
15+
"offset": 314
16+
},
17+
"extra": {
18+
"metavars": {
19+
"$FUNC": {
20+
"start": {
21+
"line": 31,
22+
"col": 25,
23+
"offset": 281
24+
},
25+
"end": {
26+
"line": 31,
27+
"col": 28,
28+
"offset": 284
29+
},
30+
"abstract_content": "run"
31+
},
32+
"$CMD": {
33+
"start": {
34+
"line": 31,
35+
"col": 29,
36+
"offset": 285
37+
},
38+
"end": {
39+
"line": 31,
40+
"col": 58,
41+
"offset": 314
42+
},
43+
"abstract_content": "[program username password]"
44+
}
45+
},
46+
"message": "Detected subprocess function 'run' with user controlled data. A malicious actor could leverage this to perform command injection. You may consider using 'shlex.quote()'.",
47+
"metadata": {
48+
"owasp": [
49+
"A01:2017 - Injection",
50+
"A03:2021 - Injection"
51+
],
52+
"cwe": [
53+
"CWE-78: Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')"
54+
],
55+
"asvs": {
56+
"control_id": "5.3.8 OS Command Injection",
57+
"control_url": "https://github.com/OWASP/ASVS/blob/master/4.0/en/0x13-V5-Validation-Sanitization-Encoding.md#v53-output-encoding-and-injection-prevention-requirements",
58+
"section": "V5: Validation, Sanitization and Encoding Verification Requirements",
59+
"version": "4"
60+
},
61+
"references": [
62+
"https://stackoverflow.com/questions/3172470/actual-meaning-of-shell-true-in-subprocess",
63+
"https://docs.python.org/3/library/subprocess.html",
64+
"https://docs.python.org/3/library/shlex.html",
65+
"https://semgrep.dev/docs/cheat-sheets/python-command-injection/"
66+
],
67+
"category": "security",
68+
"technology": [
69+
"python"
70+
],
71+
"confidence": "MEDIUM",
72+
"cwe2022-top25": true,
73+
"cwe2021-top25": true,
74+
"subcategory": [
75+
"vuln"
76+
],
77+
"likelihood": "MEDIUM",
78+
"impact": "MEDIUM",
79+
"license": "Commons Clause License Condition v1.0[LGPL-2.1-only]",
80+
"vulnerability_class": [
81+
"Command Injection"
82+
],
83+
"source": "https://semgrep.dev/r/python.lang.security.audit.dangerous-subprocess-use-tainted-env-args.dangerous-subprocess-use-tainted-env-args",
84+
"shortlink": "https://sg.run/pLGg",
85+
"semgrep.dev": {
86+
"rule": {
87+
"origin": "community",
88+
"r_id": 27262,
89+
"rule_id": "AbUgrZ",
90+
"rv_id": 928299,
91+
"url": "https://semgrep.dev/playground/r/0bTpAQn/python.lang.security.audit.dangerous-subprocess-use-tainted-env-args.dangerous-subprocess-use-tainted-env-args",
92+
"version_id": "0bTpAQn"
93+
}
94+
}
95+
},
96+
"severity": "ERROR",
97+
"fingerprint": "1d0cfd65fc17a97773e848ec31ddd8a580c5fb54914f03bebe4f934a16d5d45f22cfd3ab1b9f3ab9871da06569701e38ea2b518d2a1aa397cd5d7ecbd699d4a4_0",
98+
"lines": " result = subprocess.run([program, username, password], stdout=subprocess.DEVNULL)",
99+
"is_ignored": false,
100+
"dataflow_trace": {
101+
"taint_source": [
102+
"CliLoc",
103+
[
104+
{
105+
"path": "sample/brute.py",
106+
"start": {
107+
"line": 6,
108+
"col": 11,
109+
"offset": 64
110+
},
111+
"end": {
112+
"line": 6,
113+
"col": 19,
114+
"offset": 72
115+
}
116+
},
117+
"sys.argv"
118+
]
119+
],
120+
"intermediate_vars": [
121+
{
122+
"location": {
123+
"path": "sample/brute.py",
124+
"start": {
125+
"line": 6,
126+
"col": 1,
127+
"offset": 54
128+
},
129+
"end": {
130+
"line": 6,
131+
"col": 8,
132+
"offset": 61
133+
}
134+
},
135+
"content": "program"
136+
},
137+
{
138+
"location": {
139+
"path": "sample/brute.py",
140+
"start": {
141+
"line": 6,
142+
"col": 1,
143+
"offset": 54
144+
},
145+
"end": {
146+
"line": 6,
147+
"col": 8,
148+
"offset": 61
149+
}
150+
},
151+
"content": "program"
152+
}
153+
],
154+
"taint_sink": [
155+
"CliLoc",
156+
[
157+
{
158+
"path": "sample/brute.py",
159+
"start": {
160+
"line": 31,
161+
"col": 29,
162+
"offset": 285
163+
},
164+
"end": {
165+
"line": 31,
166+
"col": 58,
167+
"offset": 314
168+
}
169+
},
170+
"[program, username, password]"
171+
]
172+
]
173+
},
174+
"engine_kind": "OSS",
175+
"validation_state": "NO_VALIDATOR"
176+
}
177+
}
178+
],
179+
"errors": [],
180+
"paths": {
181+
"scanned": [
182+
"README.md",
183+
"sample/brute.py",
184+
"sample/findmysecrets.go",
185+
"sample/function.go",
186+
"sample/go.mod",
187+
"sample/session.go",
188+
"sample/sqli.go"
189+
]
190+
},
191+
"interfile_languages_used": [],
192+
"skipped_rules": []
193+
}
Lines changed: 193 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,193 @@
1+
{
2+
"version": "1.97.0",
3+
"results": [
4+
{
5+
"check_id": "python.lang.security.audit.dangerous-subprocess-use-tainted-env-args.dangerous-subprocess-use-tainted-env-args",
6+
"path": "sample/brute.py",
7+
"start": {
8+
"line": 24,
9+
"col": 29,
10+
"offset": 278
11+
},
12+
"end": {
13+
"line": 24,
14+
"col": 58,
15+
"offset": 307
16+
},
17+
"extra": {
18+
"metavars": {
19+
"$FUNC": {
20+
"start": {
21+
"line": 24,
22+
"col": 25,
23+
"offset": 274
24+
},
25+
"end": {
26+
"line": 24,
27+
"col": 28,
28+
"offset": 277
29+
},
30+
"abstract_content": "run"
31+
},
32+
"$CMD": {
33+
"start": {
34+
"line": 24,
35+
"col": 29,
36+
"offset": 278
37+
},
38+
"end": {
39+
"line": 24,
40+
"col": 58,
41+
"offset": 307
42+
},
43+
"abstract_content": "[program username password]"
44+
}
45+
},
46+
"message": "Detected subprocess function 'run' with user controlled data. A malicious actor could leverage this to perform command injection. You may consider using 'shlex.quote()'.",
47+
"metadata": {
48+
"owasp": [
49+
"A01:2017 - Injection",
50+
"A03:2021 - Injection"
51+
],
52+
"cwe": [
53+
"CWE-78: Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')"
54+
],
55+
"asvs": {
56+
"control_id": "5.3.8 OS Command Injection",
57+
"control_url": "https://github.com/OWASP/ASVS/blob/master/4.0/en/0x13-V5-Validation-Sanitization-Encoding.md#v53-output-encoding-and-injection-prevention-requirements",
58+
"section": "V5: Validation, Sanitization and Encoding Verification Requirements",
59+
"version": "4"
60+
},
61+
"references": [
62+
"https://stackoverflow.com/questions/3172470/actual-meaning-of-shell-true-in-subprocess",
63+
"https://docs.python.org/3/library/subprocess.html",
64+
"https://docs.python.org/3/library/shlex.html",
65+
"https://semgrep.dev/docs/cheat-sheets/python-command-injection/"
66+
],
67+
"category": "security",
68+
"technology": [
69+
"python"
70+
],
71+
"confidence": "MEDIUM",
72+
"cwe2022-top25": true,
73+
"cwe2021-top25": true,
74+
"subcategory": [
75+
"vuln"
76+
],
77+
"likelihood": "MEDIUM",
78+
"impact": "MEDIUM",
79+
"license": "Commons Clause License Condition v1.0[LGPL-2.1-only]",
80+
"vulnerability_class": [
81+
"Command Injection"
82+
],
83+
"source": "https://semgrep.dev/r/python.lang.security.audit.dangerous-subprocess-use-tainted-env-args.dangerous-subprocess-use-tainted-env-args",
84+
"shortlink": "https://sg.run/pLGg",
85+
"semgrep.dev": {
86+
"rule": {
87+
"origin": "community",
88+
"r_id": 27262,
89+
"rule_id": "AbUgrZ",
90+
"rv_id": 928299,
91+
"url": "https://semgrep.dev/playground/r/0bTpAQn/python.lang.security.audit.dangerous-subprocess-use-tainted-env-args.dangerous-subprocess-use-tainted-env-args",
92+
"version_id": "0bTpAQn"
93+
}
94+
}
95+
},
96+
"severity": "ERROR",
97+
"fingerprint": "1d0cfd65fc17a97773e848ec31ddd8a580c5fb54914f03bebe4f934a16d5d45f22cfd3ab1b9f3ab9871da06569701e38ea2b518d2a1aa397cd5d7ecbd699d4a4_0",
98+
"lines": " result = subprocess.run([program, username, password], stdout=subprocess.DEVNULL)",
99+
"is_ignored": false,
100+
"dataflow_trace": {
101+
"taint_source": [
102+
"CliLoc",
103+
[
104+
{
105+
"path": "sample/brute.py",
106+
"start": {
107+
"line": 6,
108+
"col": 11,
109+
"offset": 64
110+
},
111+
"end": {
112+
"line": 6,
113+
"col": 19,
114+
"offset": 72
115+
}
116+
},
117+
"sys.argv"
118+
]
119+
],
120+
"intermediate_vars": [
121+
{
122+
"location": {
123+
"path": "sample/brute.py",
124+
"start": {
125+
"line": 6,
126+
"col": 1,
127+
"offset": 54
128+
},
129+
"end": {
130+
"line": 6,
131+
"col": 8,
132+
"offset": 61
133+
}
134+
},
135+
"content": "program"
136+
},
137+
{
138+
"location": {
139+
"path": "sample/brute.py",
140+
"start": {
141+
"line": 6,
142+
"col": 1,
143+
"offset": 54
144+
},
145+
"end": {
146+
"line": 6,
147+
"col": 8,
148+
"offset": 61
149+
}
150+
},
151+
"content": "program"
152+
}
153+
],
154+
"taint_sink": [
155+
"CliLoc",
156+
[
157+
{
158+
"path": "sample/brute.py",
159+
"start": {
160+
"line": 24,
161+
"col": 29,
162+
"offset": 278
163+
},
164+
"end": {
165+
"line": 24,
166+
"col": 58,
167+
"offset": 307
168+
}
169+
},
170+
"[program, username, password]"
171+
]
172+
]
173+
},
174+
"engine_kind": "OSS",
175+
"validation_state": "NO_VALIDATOR"
176+
}
177+
}
178+
],
179+
"errors": [],
180+
"paths": {
181+
"scanned": [
182+
"README.md",
183+
"sample/brute.py",
184+
"sample/findmysecrets.go",
185+
"sample/function.go",
186+
"sample/go.mod",
187+
"sample/session.go",
188+
"sample/sqli.go"
189+
]
190+
},
191+
"interfile_languages_used": [],
192+
"skipped_rules": []
193+
}
Lines changed: 193 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,193 @@
1+
{
2+
"version": "1.97.0",
3+
"results": [
4+
{
5+
"check_id": "python.lang.security.audit.dangerous-subprocess-use-tainted-env-args.dangerous-subprocess-use-tainted-env-args",
6+
"path": "sample/brute.py",
7+
"start": {
8+
"line": 24,
9+
"col": 29,
10+
"offset": 278
11+
},
12+
"end": {
13+
"line": 24,
14+
"col": 58,
15+
"offset": 307
16+
},
17+
"extra": {
18+
"metavars": {
19+
"$FUNC": {
20+
"start": {
21+
"line": 24,
22+
"col": 25,
23+
"offset": 274
24+
},
25+
"end": {
26+
"line": 24,
27+
"col": 28,
28+
"offset": 277
29+
},
30+
"abstract_content": "run"
31+
},
32+
"$CMD": {
33+
"start": {
34+
"line": 24,
35+
"col": 29,
36+
"offset": 278
37+
},
38+
"end": {
39+
"line": 24,
40+
"col": 58,
41+
"offset": 307
42+
},
43+
"abstract_content": "[program username password]"
44+
}
45+
},
46+
"message": "Detected subprocess function 'run' with user controlled data. A malicious actor could leverage this to perform command injection. You may consider using 'shlex.quote()'.",
47+
"metadata": {
48+
"owasp": [
49+
"A01:2017 - Injection",
50+
"A03:2021 - Injection"
51+
],
52+
"cwe": [
53+
"CWE-78: Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')"
54+
],
55+
"asvs": {
56+
"control_id": "5.3.8 OS Command Injection",
57+
"control_url": "https://github.com/OWASP/ASVS/blob/master/4.0/en/0x13-V5-Validation-Sanitization-Encoding.md#v53-output-encoding-and-injection-prevention-requirements",
58+
"section": "V5: Validation, Sanitization and Encoding Verification Requirements",
59+
"version": "4"
60+
},
61+
"references": [
62+
"https://stackoverflow.com/questions/3172470/actual-meaning-of-shell-true-in-subprocess",
63+
"https://docs.python.org/3/library/subprocess.html",
64+
"https://docs.python.org/3/library/shlex.html",
65+
"https://semgrep.dev/docs/cheat-sheets/python-command-injection/"
66+
],
67+
"category": "security",
68+
"technology": [
69+
"python"
70+
],
71+
"confidence": "MEDIUM",
72+
"cwe2022-top25": true,
73+
"cwe2021-top25": true,
74+
"subcategory": [
75+
"vuln"
76+
],
77+
"likelihood": "MEDIUM",
78+
"impact": "MEDIUM",
79+
"license": "Commons Clause License Condition v1.0[LGPL-2.1-only]",
80+
"vulnerability_class": [
81+
"Command Injection"
82+
],
83+
"source": "https://semgrep.dev/r/python.lang.security.audit.dangerous-subprocess-use-tainted-env-args.dangerous-subprocess-use-tainted-env-args",
84+
"shortlink": "https://sg.run/pLGg",
85+
"semgrep.dev": {
86+
"rule": {
87+
"origin": "community",
88+
"r_id": 27262,
89+
"rule_id": "AbUgrZ",
90+
"rv_id": 928299,
91+
"url": "https://semgrep.dev/playground/r/0bTpAQn/python.lang.security.audit.dangerous-subprocess-use-tainted-env-args.dangerous-subprocess-use-tainted-env-args",
92+
"version_id": "0bTpAQn"
93+
}
94+
}
95+
},
96+
"severity": "ERROR",
97+
"fingerprint": "DIFFERENT1d0cfd65fc17a97773e848ec31ddd8a580c5fb54914f03bebe4f934a16d5d45f22cfd3ab1b9f3ab9871da06569701e38ea2b518d2a1aa397cd5d7ecbd699d4a4_0",
98+
"lines": " result = subprocess.run([program, username, password], stdout=subprocess.DEVNULL)",
99+
"is_ignored": false,
100+
"dataflow_trace": {
101+
"taint_source": [
102+
"CliLoc",
103+
[
104+
{
105+
"path": "sample/brute.py",
106+
"start": {
107+
"line": 6,
108+
"col": 11,
109+
"offset": 64
110+
},
111+
"end": {
112+
"line": 6,
113+
"col": 19,
114+
"offset": 72
115+
}
116+
},
117+
"sys.argv"
118+
]
119+
],
120+
"intermediate_vars": [
121+
{
122+
"location": {
123+
"path": "sample/brute.py",
124+
"start": {
125+
"line": 6,
126+
"col": 1,
127+
"offset": 54
128+
},
129+
"end": {
130+
"line": 6,
131+
"col": 8,
132+
"offset": 61
133+
}
134+
},
135+
"content": "program"
136+
},
137+
{
138+
"location": {
139+
"path": "sample/brute.py",
140+
"start": {
141+
"line": 6,
142+
"col": 1,
143+
"offset": 54
144+
},
145+
"end": {
146+
"line": 6,
147+
"col": 8,
148+
"offset": 61
149+
}
150+
},
151+
"content": "program"
152+
}
153+
],
154+
"taint_sink": [
155+
"CliLoc",
156+
[
157+
{
158+
"path": "sample/brute.py",
159+
"start": {
160+
"line": 24,
161+
"col": 29,
162+
"offset": 278
163+
},
164+
"end": {
165+
"line": 24,
166+
"col": 58,
167+
"offset": 307
168+
}
169+
},
170+
"[program, username, password]"
171+
]
172+
]
173+
},
174+
"engine_kind": "OSS",
175+
"validation_state": "NO_VALIDATOR"
176+
}
177+
}
178+
],
179+
"errors": [],
180+
"paths": {
181+
"scanned": [
182+
"README.md",
183+
"sample/brute.py",
184+
"sample/findmysecrets.go",
185+
"sample/function.go",
186+
"sample/go.mod",
187+
"sample/session.go",
188+
"sample/sqli.go"
189+
]
190+
},
191+
"interfile_languages_used": [],
192+
"skipped_rules": []
193+
}

‎unittests/test_importers_closeold.py

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,3 +114,60 @@ def test_close_old_same_product_scan(self):
114114
# Dedupe is off, and close old findings does not close old findings if they are the same finding.
115115
# If this behavior changes, or dedupe is on, the number of closed findings will be 4
116116
self.assertEqual(8, len_closed_findings)
117+
118+
def test_close_old_same_product_scan_matching_with_unique_id_from_tool(self):
119+
scan_type = "Semgrep JSON Report"
120+
user, _ = User.objects.get_or_create(username="admin")
121+
product_type, _ = Product_Type.objects.get_or_create(name="test2")
122+
product, _ = Product.objects.get_or_create(
123+
name="TestDojoCloseOldImporter3",
124+
prod_type=product_type,
125+
)
126+
engagement1, _ = Engagement.objects.get_or_create(
127+
name="Close Old Same Product 1",
128+
product=product,
129+
target_start=timezone.now(),
130+
target_end=timezone.now(),
131+
)
132+
engagement2, _ = Engagement.objects.get_or_create(
133+
name="Close Old Same Product 2",
134+
product=product,
135+
target_start=timezone.now(),
136+
target_end=timezone.now(),
137+
)
138+
engagement3, _ = Engagement.objects.get_or_create(
139+
name="Close Old Same Product 3",
140+
product=product,
141+
target_start=timezone.now(),
142+
target_end=timezone.now(),
143+
)
144+
environment, _ = Development_Environment.objects.get_or_create(name="Development")
145+
import_options = {
146+
"user": user,
147+
"lead": user,
148+
"scan_date": None,
149+
"environment": environment,
150+
"active": True,
151+
"verified": False,
152+
"close_old_findings_product_scope": True,
153+
"scan_type": scan_type,
154+
}
155+
# Import first test
156+
with open(get_unit_tests_scans_path("semgrep") / "close_old_findings_report_line31.json", "r+", encoding="utf-8") as many_findings_scan:
157+
importer = DefaultImporter(engagement=engagement1, close_old_findings=False, **import_options)
158+
_, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(many_findings_scan)
159+
self.assertEqual(1, len_new_findings)
160+
self.assertEqual(0, len_closed_findings)
161+
# Import separate report with different line number. Before this change, the legacy dedupe algorithm would calculate a different
162+
# hash code and close of the findings. Now that we are matching on unique ID from tool, we should no close anything, and create one
163+
with open(get_unit_tests_scans_path("semgrep") / "close_old_findings_report_second_run_line24.json", "r+", encoding="utf-8") as many_findings_scan:
164+
importer = DefaultImporter(engagement=engagement2, close_old_findings=True, **import_options)
165+
_, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(many_findings_scan)
166+
self.assertEqual(1, len_new_findings)
167+
self.assertEqual(0, len_closed_findings)
168+
# This scan has a different unique ID from tool, so we should have one new finding, and one closed finding
169+
with open(get_unit_tests_scans_path("semgrep") / "close_old_findings_report_third_run_different_unique_id.json", "r+", encoding="utf-8") as many_findings_scan:
170+
importer = DefaultImporter(engagement=engagement3, close_old_findings=True, **import_options)
171+
_, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(many_findings_scan)
172+
self.assertEqual(1, len_new_findings)
173+
self.assertEqual(1, len_closed_findings)

0 commit comments

Comments
 (0)
Please sign in to comment.