mirror of https://github.com/OWASP/Nettacker.git
Compare commits
25 Commits
f977c84e9e
...
250d8f39ad
| Author | SHA1 | Date |
|---|---|---|
|
|
250d8f39ad | |
|
|
65bf88e68f | |
|
|
53bc2680a5 | |
|
|
5c7c1fb539 | |
|
|
5a02e5f70b | |
|
|
6d91f9d9a6 | |
|
|
7a12ac69be | |
|
|
92e04045c7 | |
|
|
435d3f53bc | |
|
|
6e03522016 | |
|
|
cb79437b50 | |
|
|
cb601af90b | |
|
|
a92c6fd0f4 | |
|
|
776d6e7131 | |
|
|
4cd2bc1ad8 | |
|
|
c8dabbbe47 | |
|
|
6979f79a39 | |
|
|
d7c7fd473b | |
|
|
746fde3aa1 | |
|
|
bddf71bc5e | |
|
|
b9b908dadb | |
|
|
fbfbceeba0 | |
|
|
5d7e11d512 | |
|
|
4151331ba1 | |
|
|
d08f4a4274 |
|
|
@ -646,58 +646,70 @@ python nettacker.py --start-api --api-access-log --api-port 8080 --api-debug-mod
|
|||

|
||||
|
||||
# Database
|
||||
OWASP Nettacker, currently supports two databases:
|
||||
|
||||
OWASP Nettacker, currently supports three databases:
|
||||
|
||||
- SQLite
|
||||
- MySQL
|
||||
- PostgreSQL
|
||||
|
||||
The default database is SQLite. You can, however, configure the db to your liking.
|
||||
|
||||
## SQLite configuration
|
||||
The SQLite database can be configured in `core/config.py` file under the `_database_config()` function. Here is a sample configuration:
|
||||
```
|
||||
return {
|
||||
"DB": "sqlite",
|
||||
"DATABASE": _paths()["home_path"] + "/nettacker.db", # This is the location of your db
|
||||
"USERNAME": "",
|
||||
"PASSWORD": "",
|
||||
"HOST": "",
|
||||
"PORT": ""
|
||||
}
|
||||
```
|
||||
|
||||
The configurations below are for a SQLite wrapper called **APSW** (Another Python SQLite Wrapper). The configurations can be found inside `nettacker/config.py` file under the `DBConfig` class.
|
||||
|
||||
|
||||
engine = "sqlite"
|
||||
name = str(CWD / ".nettacker/data/nettacker.db")
|
||||
host = ""
|
||||
port = ""
|
||||
username = ""
|
||||
password = ""
|
||||
ssl_mode = "disable"
|
||||
journal_mode = "WAL"
|
||||
synchronous_mode = "NORMAL"
|
||||
|
||||
These are the default and recommended settings. Feel free to play around and change them according to need. To use SQLite database, ensure that the `engine` value is set to `sqlite` and the `name` is the path to your database. The `journal_mode` and `synchronous_mode` are chosen to be optimal for multithreaded I/O operations.
|
||||
|
||||
> Note: You can choose to use a lite wrapper for Sqlite called APSW by setting the `use_apsw_for_sqlite` parameter inside config to True for performance enhancements.
|
||||
|
||||
## MySQL configuration:
|
||||
The MySQL database can be configured in `core/config.py` file under the `_database_config()` function. Here is a sample configuration:
|
||||
|
||||
The MySQL database can be configured in `nettacker/config.py` file under the `DBConfig` class. Here is a sample configuration:
|
||||
|
||||
```
|
||||
return {
|
||||
"DB": "mysql",
|
||||
"DATABASE": "nettacker", # This is the name of your db
|
||||
"USERNAME": "username",
|
||||
"PASSWORD": "password",
|
||||
"HOST": "localhost or some other host",
|
||||
"PORT": "3306 or some other custom port"
|
||||
}
|
||||
engine = "mysql"
|
||||
name = "nettacker"
|
||||
host = "localhost"
|
||||
port = 3306
|
||||
username = "root"
|
||||
password = "some-password"
|
||||
ssl_mode = "disable"
|
||||
journal_mode = "WAL"
|
||||
synchronous_mode = "NORMAL"
|
||||
```
|
||||
After this configuration:
|
||||
1. Open the configuration file of mysql(`/etc/mysql/my.cnf` in case of linux) as a sudo user
|
||||
2. Add this to the end of the file :
|
||||
```
|
||||
[mysqld]
|
||||
sql_mode = "STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION"
|
||||
```
|
||||
3. Restart MySQL
|
||||
|
||||
Only the relevant fields will be considered and you don't need to update/change/remove the irrelevant ones (`ssl_mode`, `journal_mode` and `synchronous_mode` aren't relevant in this case).
|
||||
|
||||
## Postgres Configuration
|
||||
|
||||
The Postgres database can be configured in core/config.py file under the _database_config() function. Here is a sample configuration:
|
||||
`
|
||||
return {
|
||||
"DB": "postgreas",
|
||||
"DATABASE": "nettacker" # Name of db
|
||||
"USERNAME": "username",
|
||||
"PASSWORD": "password",
|
||||
"HOST": "localhost or some other host",
|
||||
"PORT": "5432 or some other custom port"
|
||||
}
|
||||
`
|
||||
After this configuration please comment out the following line in database/db.py `connect_args={'check_same_thread': False}`
|
||||
The Postgres database can be configured in `nettacker/config.py` file under the `DBConfig` class. Here is a sample configuration:
|
||||
|
||||
```
|
||||
engine = "postgres"
|
||||
name = "nettacker"
|
||||
host = "localhost"
|
||||
port = 5432
|
||||
username = "root"
|
||||
password = "some-password"
|
||||
ssl_mode = "disable"
|
||||
journal_mode = "WAL"
|
||||
synchronous_mode = "NORMAL"
|
||||
```
|
||||
|
||||
In this case the irrelevant fields are `journal_mode` and `synchronous_mode`. You don't have to update/change/remove them.
|
||||
|
||||
**Note**: If you want encryption, then set `ssl_mode` to `require`.
|
||||
|
||||
Let me know if you have any more questions.
|
||||
|
|
@ -82,7 +82,11 @@ class DbConfig(ConfigBase):
|
|||
For sqlite database:
|
||||
fill the name of the DB as sqlite,
|
||||
DATABASE as the name of the db user wants
|
||||
other details can be left empty
|
||||
Set the journal_mode (default="WAL") and
|
||||
synchronous_mode (default="NORMAL"). Rest
|
||||
of the fields can be left empty
|
||||
This is the default database:
|
||||
str(CWD / ".nettacker/data/nettacker.db")
|
||||
For mysql users:
|
||||
fill the ENGINE name of the DB as mysql
|
||||
NAME as the name of the database you want to create
|
||||
|
|
@ -104,6 +108,8 @@ class DbConfig(ConfigBase):
|
|||
username = ""
|
||||
password = ""
|
||||
ssl_mode = "disable"
|
||||
journal_mode = "WAL"
|
||||
synchronous_mode = "NORMAL"
|
||||
|
||||
|
||||
class PathConfig:
|
||||
|
|
@ -142,6 +148,9 @@ class DefaultSettings(ConfigBase):
|
|||
parallel_module_scan = 1
|
||||
passwords = None
|
||||
passwords_list = None
|
||||
use_apsw_for_sqlite = (
|
||||
False # Setting to toggle between APSW and SQLAlchemy for sqlite databases
|
||||
)
|
||||
ping_before_scan = False
|
||||
ports = None
|
||||
profiles = None
|
||||
|
|
@ -151,6 +160,8 @@ class DefaultSettings(ConfigBase):
|
|||
random_chars=generate_random_token(10),
|
||||
)
|
||||
retries = 1
|
||||
max_retries = 3
|
||||
retry_delay = 0.1
|
||||
scan_ip_range = False
|
||||
scan_subdomains = False
|
||||
selected_modules = None
|
||||
|
|
|
|||
|
|
@ -158,9 +158,7 @@ class Nettacker(ArgParser):
|
|||
|
||||
for target in copy.deepcopy(self.arguments.targets):
|
||||
for row in find_events(target, "subdomain_scan", scan_id):
|
||||
for sub_domain in json.loads(row.json_event)["response"]["conditions_results"][
|
||||
"content"
|
||||
]:
|
||||
for sub_domain in json.loads(row)["response"]["conditions_results"]["content"]:
|
||||
if sub_domain not in self.arguments.targets:
|
||||
self.arguments.targets.append(sub_domain)
|
||||
# icmp_scan
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ def build_text_table(events):
|
|||
table_headers = ["date", "target", "module_name", "port", "logs"]
|
||||
_table.add_rows([table_headers])
|
||||
for event in events:
|
||||
log = merge_logs_to_list(json.loads(event["json_event"]), [])
|
||||
log = merge_logs_to_list(event, [])
|
||||
_table.add_rows(
|
||||
[
|
||||
table_headers,
|
||||
|
|
@ -252,7 +252,7 @@ def create_report(options, scan_id):
|
|||
)
|
||||
index = 1
|
||||
for event in all_scan_logs:
|
||||
log_list = merge_logs_to_list(json.loads(event["json_event"]), [])
|
||||
log_list = merge_logs_to_list(event, [])
|
||||
html_table_content += log_data.table_items.format(
|
||||
event["date"],
|
||||
event["target"],
|
||||
|
|
@ -260,7 +260,7 @@ def create_report(options, scan_id):
|
|||
event["port"],
|
||||
"<br>".join(log_list) if log_list else "Detected", # event["event"], #log
|
||||
index,
|
||||
html.escape(event["json_event"]),
|
||||
html.escape(json.dumps(event)),
|
||||
)
|
||||
index += 1
|
||||
html_table_content += (
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ class BaseEngine(ABC):
|
|||
while True:
|
||||
event = find_temp_events(target, module_name, scan_id, event_name)
|
||||
if event:
|
||||
events.append(json.loads(event.event)["response"]["conditions_results"])
|
||||
events.append(json.loads(event)["response"]["conditions_results"])
|
||||
break
|
||||
time.sleep(0.1)
|
||||
return events
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ class Module:
|
|||
if not self.skip_service_discovery and self.module_name not in self.ignored_core_modules:
|
||||
services = {}
|
||||
for service in find_events(self.target, "port_scan", self.scan_id):
|
||||
service_event = json.loads(service.json_event)
|
||||
service_event = json.loads(service)
|
||||
port = service_event["port"]
|
||||
protocols = service_event["response"]["conditions_results"].keys()
|
||||
for protocol in protocols:
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import ctypes
|
|||
import datetime
|
||||
import hashlib
|
||||
import importlib
|
||||
import json
|
||||
import math
|
||||
import multiprocessing
|
||||
import random
|
||||
|
|
@ -32,6 +33,10 @@ def replace_dependent_response(log, response_dependent):
|
|||
|
||||
def merge_logs_to_list(result, log_list=[]):
|
||||
if isinstance(result, dict):
|
||||
# Doesn't hurt normal operations
|
||||
if "json_event" in list(result.keys()):
|
||||
if not isinstance(result["json_event"], dict):
|
||||
result["json_event"] = json.loads(result["json_event"])
|
||||
for i in result:
|
||||
if "log" == i:
|
||||
log_list.append(result["log"])
|
||||
|
|
|
|||
|
|
@ -1,6 +1,11 @@
|
|||
import json
|
||||
import time
|
||||
|
||||
try:
|
||||
import apsw
|
||||
except ImportError:
|
||||
apsw = None
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
|
|
@ -11,7 +16,7 @@ from nettacker.core.messages import messages
|
|||
from nettacker.database.models import HostsLog, Report, TempEvents
|
||||
|
||||
config = Config()
|
||||
log = logger.get_logger()
|
||||
logger = logger.get_logger()
|
||||
|
||||
|
||||
def db_inputs(connection_type):
|
||||
|
|
@ -39,9 +44,31 @@ def create_connection():
|
|||
"""
|
||||
a function to create connections to db with pessimistic approach
|
||||
|
||||
Returns:
|
||||
connection if success otherwise False
|
||||
For sqlite, it creates and returns a sqlite connection object
|
||||
for mysql and postgresql, it returns the connection or False if
|
||||
connection failed.
|
||||
"""
|
||||
if Config.db.engine.startswith("sqlite") and Config.settings.use_apsw_for_sqlite:
|
||||
if apsw is None:
|
||||
raise ImportError("APSW is required for SQLite backend.")
|
||||
# In case of sqlite, the name parameter is the database path
|
||||
|
||||
try:
|
||||
DB_PATH = config.db.as_dict()["name"]
|
||||
connection = apsw.Connection(DB_PATH)
|
||||
connection.setbusytimeout(int(config.settings.timeout) * 100)
|
||||
cursor = connection.cursor()
|
||||
|
||||
# Performance enhancing configurations. Put WAL cause that helps with concurrency
|
||||
cursor.execute(f"PRAGMA journal_mode={Config.db.journal_mode}")
|
||||
cursor.execute(f"PRAGMA synchronous={Config.db.synchronous_mode}")
|
||||
|
||||
return connection, cursor
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create APSW connection: {e}")
|
||||
raise
|
||||
|
||||
else:
|
||||
connection_args = {}
|
||||
|
||||
if Config.db.engine.startswith("sqlite"):
|
||||
|
|
@ -70,6 +97,21 @@ def send_submit_query(session):
|
|||
Returns:
|
||||
True if submitted success otherwise False
|
||||
"""
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
for _ in range(100):
|
||||
try:
|
||||
connection.execute("COMMIT")
|
||||
return True
|
||||
except Exception:
|
||||
connection.execute("ROLLBACK")
|
||||
time.sleep(0.1)
|
||||
finally:
|
||||
connection.close()
|
||||
connection.close()
|
||||
logger.warn(messages("database_connect_fail"))
|
||||
return False
|
||||
else:
|
||||
try:
|
||||
for _ in range(1, 100):
|
||||
try:
|
||||
|
|
@ -77,8 +119,10 @@ def send_submit_query(session):
|
|||
return True
|
||||
except Exception:
|
||||
time.sleep(0.1)
|
||||
logger.warn(messages("database_connect_fail"))
|
||||
return False
|
||||
except Exception:
|
||||
log.warn(messages("database_connect_fail"))
|
||||
logger.warn(messages("database_connect_fail"))
|
||||
return False
|
||||
return False
|
||||
|
||||
|
|
@ -94,8 +138,35 @@ def submit_report_to_db(event):
|
|||
Returns:
|
||||
return True if submitted otherwise False
|
||||
"""
|
||||
log.verbose_info(messages("inserting_report_db"))
|
||||
logger.verbose_info(messages("inserting_report_db"))
|
||||
session = create_connection()
|
||||
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
|
||||
try:
|
||||
cursor.execute("BEGIN")
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO reports (date, scan_unique_id, report_path_filename, options)
|
||||
VALUES (?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
str(event["date"]),
|
||||
event["scan_id"],
|
||||
event["options"]["report_path_filename"],
|
||||
json.dumps(event["options"]),
|
||||
),
|
||||
)
|
||||
return send_submit_query(session)
|
||||
except Exception:
|
||||
cursor.execute("ROLLBACK")
|
||||
logger.warn("Could not insert report...")
|
||||
return False
|
||||
finally:
|
||||
cursor.close()
|
||||
connection.close()
|
||||
else:
|
||||
session.add(
|
||||
Report(
|
||||
date=event["date"],
|
||||
|
|
@ -119,6 +190,35 @@ def remove_old_logs(options):
|
|||
True if success otherwise False
|
||||
"""
|
||||
session = create_connection()
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
|
||||
try:
|
||||
cursor.execute("BEGIN")
|
||||
cursor.execute(
|
||||
"""
|
||||
DELETE FROM scan_events
|
||||
WHERE target = ?
|
||||
AND module_name = ?
|
||||
AND scan_unique_id != ?
|
||||
AND scan_unique_id != ?
|
||||
""",
|
||||
(
|
||||
options["target"],
|
||||
options["module_name"],
|
||||
options["scan_id"],
|
||||
options["scan_compare_id"],
|
||||
),
|
||||
)
|
||||
return send_submit_query(session)
|
||||
except Exception:
|
||||
cursor.execute("ROLLBACK")
|
||||
logger.warn("Could not remove old logs...")
|
||||
return False
|
||||
finally:
|
||||
cursor.close()
|
||||
connection.close()
|
||||
else:
|
||||
session.query(HostsLog).filter(
|
||||
HostsLog.target == options["target"],
|
||||
HostsLog.module_name == options["module_name"],
|
||||
|
|
@ -131,7 +231,9 @@ def remove_old_logs(options):
|
|||
|
||||
def submit_logs_to_db(log):
|
||||
"""
|
||||
this function created to submit new events into database
|
||||
this function created to submit new events into database.
|
||||
This requires a little more robust handling in case of
|
||||
APSW in order to avoid database lock issues.
|
||||
|
||||
Args:
|
||||
log: log event in JSON type
|
||||
|
|
@ -139,8 +241,61 @@ def submit_logs_to_db(log):
|
|||
Returns:
|
||||
True if success otherwise False
|
||||
"""
|
||||
|
||||
if isinstance(log, dict):
|
||||
session = create_connection()
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
try:
|
||||
for _ in range(Config.settings.max_retries):
|
||||
try:
|
||||
if not connection.in_transaction:
|
||||
connection.execute("BEGIN")
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO scan_events (target, date, module_name, scan_unique_id, port, event, json_event)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
log["target"],
|
||||
str(log["date"]),
|
||||
log["module_name"],
|
||||
log["scan_id"],
|
||||
json.dumps(log["port"]),
|
||||
json.dumps(log["event"]),
|
||||
json.dumps(log["json_event"]),
|
||||
),
|
||||
)
|
||||
return send_submit_query(session)
|
||||
|
||||
except apsw.BusyError as e:
|
||||
if "database is locked" in str(e).lower():
|
||||
logger.warn(
|
||||
f"[Retry {_ + 1}/{Config.settings.max_retries}] Database is locked. Retrying..."
|
||||
)
|
||||
if connection.in_transaction:
|
||||
connection.execute("ROLLBACK")
|
||||
time.sleep(Config.settings.retry_delay)
|
||||
continue
|
||||
else:
|
||||
if connection.in_transaction:
|
||||
connection.execute("ROLLBACK")
|
||||
return False
|
||||
except Exception:
|
||||
try:
|
||||
if connection.in_transaction:
|
||||
connection.execute("ROLLBACK")
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
# All retires exhausted but we want to continue operation
|
||||
logger.warn("All retries exhausted. Skipping this log.")
|
||||
return True
|
||||
finally:
|
||||
cursor.close()
|
||||
connection.close()
|
||||
|
||||
else:
|
||||
session.add(
|
||||
HostsLog(
|
||||
target=log["target"],
|
||||
|
|
@ -154,13 +309,15 @@ def submit_logs_to_db(log):
|
|||
)
|
||||
return send_submit_query(session)
|
||||
else:
|
||||
log.warn(messages("invalid_json_type_to_db").format(log))
|
||||
logger.warn(messages("invalid_json_type_to_db").format(log))
|
||||
return False
|
||||
|
||||
|
||||
def submit_temp_logs_to_db(log):
|
||||
"""
|
||||
this function created to submit new events into database
|
||||
this function created to submit new events into database.
|
||||
This requires a little more robust handling in case of
|
||||
APSW in order to avoid database lock issues.
|
||||
|
||||
Args:
|
||||
log: log event in JSON type
|
||||
|
|
@ -170,6 +327,64 @@ def submit_temp_logs_to_db(log):
|
|||
"""
|
||||
if isinstance(log, dict):
|
||||
session = create_connection()
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
|
||||
try:
|
||||
for _ in range(Config.settings.max_retries):
|
||||
try:
|
||||
if not connection.in_transaction:
|
||||
cursor.execute("BEGIN")
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO temp_events (target, date, module_name, scan_unique_id, event_name, port, event, data)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
log["target"],
|
||||
str(log["date"]),
|
||||
log["module_name"],
|
||||
log["scan_id"],
|
||||
log["event_name"],
|
||||
json.dumps(log["port"]),
|
||||
json.dumps(log["event"]),
|
||||
json.dumps(log["data"]),
|
||||
),
|
||||
)
|
||||
return send_submit_query(session)
|
||||
except apsw.BusyError as e:
|
||||
if "database is locked" in str(e).lower():
|
||||
logger.warn(
|
||||
f"[Retry {_ + 1}/{Config.settings.max_retries}] Database is locked. Retrying..."
|
||||
)
|
||||
try:
|
||||
if connection.in_transaction:
|
||||
connection.execute("ROLLBACK")
|
||||
except Exception:
|
||||
pass
|
||||
time.sleep(Config.settings.retry_delay)
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
if connection.in_transaction:
|
||||
connection.execute("ROLLBACK")
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
except Exception:
|
||||
try:
|
||||
if connection.in_transaction:
|
||||
connection.execute("ROLLBACK")
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
# All retires exhausted but we want to continue operation
|
||||
logger.warn("All retries exhausted. Skipping this log.")
|
||||
return True
|
||||
finally:
|
||||
cursor.close()
|
||||
connection.close()
|
||||
else:
|
||||
session.add(
|
||||
TempEvents(
|
||||
target=log["target"],
|
||||
|
|
@ -184,7 +399,7 @@ def submit_temp_logs_to_db(log):
|
|||
)
|
||||
return send_submit_query(session)
|
||||
else:
|
||||
log.warn(messages("invalid_json_type_to_db").format(log))
|
||||
logger.warn(messages("invalid_json_type_to_db").format(log))
|
||||
return False
|
||||
|
||||
|
||||
|
|
@ -202,10 +417,31 @@ def find_temp_events(target, module_name, scan_id, event_name):
|
|||
an array with JSON events or an empty array
|
||||
"""
|
||||
session = create_connection()
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
try:
|
||||
for _ in range(1, 100):
|
||||
try:
|
||||
return (
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT event
|
||||
FROM temp_events
|
||||
WHERE target = ? AND module_name = ? AND scan_unique_id = ? AND event_name = ?
|
||||
LIMIT 1
|
||||
""",
|
||||
(target, module_name, scan_id, event_name),
|
||||
)
|
||||
|
||||
row = cursor.fetchone()
|
||||
cursor.close()
|
||||
connection.close()
|
||||
if row:
|
||||
return row[0]
|
||||
return []
|
||||
except Exception:
|
||||
logger.warn(messages("database_connect_fail"))
|
||||
return []
|
||||
return []
|
||||
else:
|
||||
result = (
|
||||
session.query(TempEvents)
|
||||
.filter(
|
||||
TempEvents.target == target,
|
||||
|
|
@ -215,12 +451,8 @@ def find_temp_events(target, module_name, scan_id, event_name):
|
|||
)
|
||||
.first()
|
||||
)
|
||||
except Exception:
|
||||
time.sleep(0.1)
|
||||
except Exception:
|
||||
log.warn(messages("database_connect_fail"))
|
||||
return False
|
||||
return False
|
||||
|
||||
return result.event if result else []
|
||||
|
||||
|
||||
def find_events(target, module_name, scan_id):
|
||||
|
|
@ -236,15 +468,38 @@ def find_events(target, module_name, scan_id):
|
|||
an array with JSON events or an empty array
|
||||
"""
|
||||
session = create_connection()
|
||||
return (
|
||||
session.query(HostsLog)
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
|
||||
try:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT json_event FROM scan_events
|
||||
WHERE target = ? AND module_name = ? and scan_unique_id = ?
|
||||
""",
|
||||
(target, module_name, scan_id),
|
||||
)
|
||||
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
connection.close()
|
||||
if rows:
|
||||
return [json.dumps((json.loads(row[0]))) for row in rows]
|
||||
return []
|
||||
except Exception:
|
||||
logger.warn("Database query failed...")
|
||||
return []
|
||||
else:
|
||||
return [
|
||||
row.json_event
|
||||
for row in session.query(HostsLog)
|
||||
.filter(
|
||||
HostsLog.target == target,
|
||||
HostsLog.module_name == module_name,
|
||||
HostsLog.scan_unique_id == scan_id,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def select_reports(page):
|
||||
|
|
@ -261,6 +516,40 @@ def select_reports(page):
|
|||
"""
|
||||
selected = []
|
||||
session = create_connection()
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
offset = (page - 1) * 10
|
||||
|
||||
try:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT id, date, scan_unique_id, report_path_filename, options
|
||||
FROM reports
|
||||
ORDER BY id DESC
|
||||
LIMIT 10 OFFSET ?
|
||||
""",
|
||||
(offset,),
|
||||
)
|
||||
|
||||
rows = cursor.fetchall()
|
||||
|
||||
cursor.close()
|
||||
connection.close()
|
||||
for row in rows:
|
||||
tmp = {
|
||||
"id": row[0],
|
||||
"date": str(row[1]),
|
||||
"scan_id": row[2],
|
||||
"report_path_filename": row[3],
|
||||
"options": json.loads(row[4]),
|
||||
}
|
||||
selected.append(tmp)
|
||||
return selected
|
||||
|
||||
except Exception:
|
||||
logger.warn("Could not retrieve report...")
|
||||
return structure(status="error", msg="database error!")
|
||||
else:
|
||||
try:
|
||||
search_data = (
|
||||
session.query(Report).order_by(Report.id.desc()).offset((page * 10) - 10).limit(10)
|
||||
|
|
@ -290,9 +579,38 @@ def get_scan_result(id):
|
|||
result file content (TEXT, HTML, JSON) if success otherwise and error in JSON type.
|
||||
"""
|
||||
session = create_connection()
|
||||
filename = session.query(Report).filter_by(id=id).first().report_path_filename
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT report_path_filename from reports
|
||||
WHERE id = ?
|
||||
""",
|
||||
(id,),
|
||||
)
|
||||
|
||||
row = cursor.fetchone()
|
||||
cursor.close()
|
||||
connection.close()
|
||||
if row:
|
||||
filename = row[0]
|
||||
try:
|
||||
return filename, open(str(filename), "rb").read()
|
||||
except IOError as e:
|
||||
logger.error(f"Failed to read report file: {e}")
|
||||
return None
|
||||
else:
|
||||
return structure(status="error", msg="database error!")
|
||||
else:
|
||||
report = session.query(Report).filter_by(id=id).first()
|
||||
if not report:
|
||||
return None
|
||||
|
||||
try:
|
||||
return report.report_path_filename, open(str(report.report_path_filename), "rb").read()
|
||||
except IOError as e:
|
||||
logger.error(f"Failed to read report file: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def last_host_logs(page):
|
||||
|
|
@ -307,6 +625,78 @@ def last_host_logs(page):
|
|||
an array of events in JSON type if success otherwise an error in JSON type
|
||||
"""
|
||||
session = create_connection()
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
try:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT DISTINCT target
|
||||
FROM scan_events
|
||||
ORDER BY id DESC
|
||||
LIMIT 10 OFFSET ?
|
||||
""",
|
||||
[(page - 1) * 10],
|
||||
)
|
||||
targets = cursor.fetchall()
|
||||
|
||||
if not targets:
|
||||
return structure(status="finished", msg="No more search results")
|
||||
|
||||
hosts = []
|
||||
|
||||
for (target,) in targets:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT DISTINCT module_name
|
||||
FROM scan_events
|
||||
WHERE target = ?
|
||||
""",
|
||||
[target],
|
||||
)
|
||||
module_names = [row[0] for row in cursor.fetchall()]
|
||||
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT date
|
||||
FROM scan_events
|
||||
WHERE target = ?
|
||||
ORDER BY id DESC
|
||||
LIMIT 1
|
||||
""",
|
||||
[target],
|
||||
)
|
||||
latest_date = cursor.fetchone()
|
||||
latest_date = latest_date[0] if latest_date else None
|
||||
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT event
|
||||
FROM scan_events
|
||||
WHERE target = ?
|
||||
""",
|
||||
[target],
|
||||
)
|
||||
events = [row[0] for row in cursor.fetchall()]
|
||||
|
||||
hosts.append(
|
||||
{
|
||||
"target": target,
|
||||
"info": {
|
||||
"module_name": module_names,
|
||||
"date": latest_date,
|
||||
"events": events,
|
||||
},
|
||||
}
|
||||
)
|
||||
cursor.close()
|
||||
connection.close()
|
||||
return hosts
|
||||
|
||||
except Exception:
|
||||
logger.warn("Database query failed...")
|
||||
return structure(status="error", msg="Database error!")
|
||||
|
||||
else:
|
||||
hosts = [
|
||||
{
|
||||
"target": host.target,
|
||||
|
|
@ -330,7 +720,9 @@ def last_host_logs(page):
|
|||
# ],
|
||||
"events": [
|
||||
_.event
|
||||
for _ in session.query(HostsLog).filter(HostsLog.target == host.target).all()
|
||||
for _ in session.query(HostsLog)
|
||||
.filter(HostsLog.target == host.target)
|
||||
.all()
|
||||
],
|
||||
},
|
||||
}
|
||||
|
|
@ -356,6 +748,36 @@ def get_logs_by_scan_id(scan_id):
|
|||
an array with JSON events or an empty array
|
||||
"""
|
||||
session = create_connection()
|
||||
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT scan_unique_id, target, module_name, date, port, event, json_event
|
||||
from scan_events
|
||||
WHERE scan_unique_id = ?
|
||||
""",
|
||||
(scan_id,), # We have to put this as an indexed element
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
|
||||
cursor.close()
|
||||
connection.close()
|
||||
return [
|
||||
{
|
||||
"scan_id": row[0],
|
||||
"target": row[1],
|
||||
"module_name": row[2],
|
||||
"date": str(row[3]),
|
||||
"port": json.loads(row[4]),
|
||||
"event": json.loads(row[5]),
|
||||
"json_event": json.loads(row[6]) if row[6] else {},
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
|
||||
else:
|
||||
return [
|
||||
{
|
||||
"scan_id": scan_id,
|
||||
|
|
@ -379,6 +801,23 @@ def get_options_by_scan_id(scan_id):
|
|||
an array with a dict with stored options or an empty array
|
||||
"""
|
||||
session = create_connection()
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT options from reports
|
||||
WHERE scan_unique_id = ?
|
||||
""",
|
||||
(scan_id,),
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
connection.close()
|
||||
if rows:
|
||||
return [{"options": row[0]} for row in rows]
|
||||
|
||||
else:
|
||||
return [
|
||||
{"options": log.options}
|
||||
for log in session.query(Report).filter(Report.scan_unique_id == scan_id).all()
|
||||
|
|
@ -397,6 +836,33 @@ def logs_to_report_json(target):
|
|||
"""
|
||||
try:
|
||||
session = create_connection()
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
return_logs = []
|
||||
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT scan_unique_id, target, port, event, json_event
|
||||
FROM scan_events WHERE target = ?
|
||||
""",
|
||||
(target,),
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
connection.close()
|
||||
if rows:
|
||||
for log in rows:
|
||||
data = {
|
||||
"scan_id": log[0],
|
||||
"target": log[1],
|
||||
"port": json.loads(log[2]),
|
||||
"event": json.loads(log[3]),
|
||||
"json_event": json.loads(log[4]),
|
||||
}
|
||||
return_logs.append(data)
|
||||
return return_logs
|
||||
|
||||
else:
|
||||
return_logs = []
|
||||
logs = session.query(HostsLog).filter(HostsLog.target == target)
|
||||
for log in logs:
|
||||
|
|
@ -409,6 +875,7 @@ def logs_to_report_json(target):
|
|||
}
|
||||
return_logs.append(data)
|
||||
return return_logs
|
||||
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
|
@ -427,6 +894,61 @@ def logs_to_report_html(target):
|
|||
from nettacker.lib.html_log import log_data
|
||||
|
||||
session = create_connection()
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT date, target, module_name, scan_unique_id, port, event, json_event
|
||||
FROM scan_events
|
||||
WHERE target = ?
|
||||
""",
|
||||
(target,),
|
||||
)
|
||||
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
connection.close()
|
||||
logs = [
|
||||
{
|
||||
"date": log[0],
|
||||
"target": log[1],
|
||||
"module_name": log[2],
|
||||
"scan_id": log[3],
|
||||
"port": log[4],
|
||||
"event": log[5],
|
||||
"json_event": log[6],
|
||||
}
|
||||
for log in rows
|
||||
]
|
||||
|
||||
html_graph = build_graph("d3_tree_v2_graph", logs)
|
||||
|
||||
html_content = log_data.table_title.format(
|
||||
html_graph,
|
||||
log_data.css_1,
|
||||
"date",
|
||||
"target",
|
||||
"module_name",
|
||||
"scan_id",
|
||||
"port",
|
||||
"event",
|
||||
"json_event",
|
||||
)
|
||||
for event in logs:
|
||||
html_content += log_data.table_items.format(
|
||||
event["date"],
|
||||
event["target"],
|
||||
event["module_name"],
|
||||
event["scan_id"],
|
||||
event["port"],
|
||||
event["event"],
|
||||
event["json_event"],
|
||||
)
|
||||
html_content += (
|
||||
log_data.table_end + '<p class="footer">' + messages("nettacker_report") + "</p>"
|
||||
)
|
||||
return html_content
|
||||
else:
|
||||
logs = [
|
||||
{
|
||||
"date": log.date,
|
||||
|
|
@ -480,8 +1002,88 @@ def search_logs(page, query):
|
|||
Returns:
|
||||
an array with JSON structure of founded events or an empty array
|
||||
"""
|
||||
session = create_connection()
|
||||
selected = []
|
||||
session = create_connection()
|
||||
if isinstance(session, tuple):
|
||||
connection, cursor = session
|
||||
try:
|
||||
# Fetch targets matching the query
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT DISTINCT target FROM scan_events
|
||||
WHERE target LIKE ? OR date LIKE ? OR module_name LIKE ?
|
||||
OR port LIKE ? OR event LIKE ? OR scan_unique_id LIKE ?
|
||||
ORDER BY id DESC
|
||||
LIMIT 10 OFFSET ?
|
||||
""",
|
||||
(
|
||||
f"%{query}%",
|
||||
f"%{query}%",
|
||||
f"%{query}%",
|
||||
f"%{query}%",
|
||||
f"%{query}%",
|
||||
f"%{query}%",
|
||||
(page * 10) - 10,
|
||||
),
|
||||
)
|
||||
targets = cursor.fetchall()
|
||||
for target_row in targets:
|
||||
target = target_row[0]
|
||||
# Fetch data for each target grouped by key fields
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT date, module_name, port, event, json_event FROM scan_events
|
||||
WHERE target = ?
|
||||
GROUP BY module_name, port, scan_unique_id, event
|
||||
ORDER BY id DESC
|
||||
""",
|
||||
(target,),
|
||||
)
|
||||
results = cursor.fetchall()
|
||||
|
||||
tmp = {
|
||||
"target": target,
|
||||
"info": {
|
||||
"module_name": [],
|
||||
"port": [],
|
||||
"date": [],
|
||||
"event": [],
|
||||
"json_event": [],
|
||||
},
|
||||
}
|
||||
|
||||
for data in results:
|
||||
date, module_name, port, event, json_event = data
|
||||
if module_name not in tmp["info"]["module_name"]:
|
||||
tmp["info"]["module_name"].append(module_name)
|
||||
if date not in tmp["info"]["date"]:
|
||||
tmp["info"]["date"].append(date)
|
||||
parsed_port = json.loads(port)
|
||||
if parsed_port not in tmp["info"]["port"]:
|
||||
tmp["info"]["port"].append(parsed_port)
|
||||
parsed_event = json.loads(event)
|
||||
if parsed_event not in tmp["info"]["event"]:
|
||||
tmp["info"]["event"].append(parsed_event)
|
||||
parsed_json_event = json.loads(json_event)
|
||||
if parsed_json_event not in tmp["info"]["json_event"]:
|
||||
tmp["info"]["json_event"].append(parsed_json_event)
|
||||
|
||||
selected.append(tmp)
|
||||
cursor.close()
|
||||
connection.close()
|
||||
|
||||
except Exception:
|
||||
try:
|
||||
cursor.close()
|
||||
connection.close()
|
||||
except Exception:
|
||||
pass
|
||||
return structure(status="error", msg="database error!")
|
||||
|
||||
if len(selected) == 0:
|
||||
return structure(status="finished", msg="No more search results")
|
||||
return selected
|
||||
else:
|
||||
try:
|
||||
for host in (
|
||||
session.query(HostsLog)
|
||||
|
|
@ -543,7 +1145,9 @@ def search_logs(page, query):
|
|||
if data.event not in selected[capture]["info"]["event"]:
|
||||
selected[capture]["info"]["event"].append(json.loads(data.event))
|
||||
if data.json_event not in selected[capture]["info"]["json_event"]:
|
||||
selected[capture]["info"]["json_event"].append(json.loads(data.json_event))
|
||||
selected[capture]["info"]["json_event"].append(
|
||||
json.loads(data.json_event)
|
||||
)
|
||||
except Exception:
|
||||
return structure(status="error", msg="database error!")
|
||||
if len(selected) == 0:
|
||||
|
|
|
|||
|
|
@ -1093,3 +1093,7 @@ payloads:
|
|||
amqp:
|
||||
regex: "AMQP"
|
||||
reverse: false
|
||||
|
||||
smb:
|
||||
regex: "SMB\\d+|Microsoft Windows Network|Server\\sMessage\\sBlock\\sProtocol|\\d{{1,3}}\\.\\d{{1,3}}\\.\\d{{1,3}}\\.\\d{{1,3}}.*?SMB.*?|Session\\sError|Not\\simplemented|Protocol\\sViolation|\\d+\\sbytes\\sreceived|SMB\\sConnection\\sterminated|Session\\sestablished\\susing\\sSMB\\d+|NTLMv2|Negotiate Protocol|SMB2\\sProtocol\\sNegotiation|Session\\sSetup\\sSMB|Tree\\sConnect"
|
||||
reverse: false
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiohappyeyeballs"
|
||||
|
|
@ -140,6 +140,67 @@ files = [
|
|||
[package.dependencies]
|
||||
frozenlist = ">=1.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "apsw"
|
||||
version = "3.50.0.0"
|
||||
description = "Another Python SQLite Wrapper"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "apsw-3.50.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6824df28649514c0efa401ec93d23f44a984a089a6e5d404df90ecd657ea290"},
|
||||
{file = "apsw-3.50.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a1fd2574eb5cbd63603f37a106d41288c3c6d5eb432278c0fe625014d4c15176"},
|
||||
{file = "apsw-3.50.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f7a41dacb3011db2bb0b8b099c1cf7e926590ae6bacb59c0c849dd30d4046db"},
|
||||
{file = "apsw-3.50.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2568626790104dafb707c40e4b7c2abe41ba555d4590a7d94460cedee6d7ae"},
|
||||
{file = "apsw-3.50.0.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d176e03b54441b0d7e20d435b655dbf358bbfb15d6b17dd5a8432f04ce9f9bf1"},
|
||||
{file = "apsw-3.50.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a242a4b5000b2f1c43374b5a7998e8a87202d3b556eb56f269fbac014d2c294e"},
|
||||
{file = "apsw-3.50.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9ee61e4c87d23916e8af2256a99df814f8c8367ce51b26628a6e6cb85f956923"},
|
||||
{file = "apsw-3.50.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8f33e04eeab64804defdbf1614b586d9e1d87769a09b7f79cd68c961142682a9"},
|
||||
{file = "apsw-3.50.0.0-cp310-cp310-win32.whl", hash = "sha256:174dc62afdbf75800b8579ad536e2e189f6b4e1b92ae2e3dbb9d5f583260d6c5"},
|
||||
{file = "apsw-3.50.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5c2fef8008376d42603050b1b9772c61545ede1e8dca3824c948eaafc3e7b2ef"},
|
||||
{file = "apsw-3.50.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0b05b9d510de3371ec748e9cd1e906bf14ef61f1cd88775358bf3e7a508bac93"},
|
||||
{file = "apsw-3.50.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b4abac5dd66bdae85def74f78d66c6d28ed9a1e535b31af38a4d474a6095a444"},
|
||||
{file = "apsw-3.50.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7167a3b10c0065eebba1320f333b825a0faff9defc355af3d3519272e7ccb931"},
|
||||
{file = "apsw-3.50.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50495f4108084ee24a904c37b902d57530ac4f19cd0918c9af3595febd1bd205"},
|
||||
{file = "apsw-3.50.0.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:130878423946e1595d3cc4aa1f202a0bec4ab64826a9526abb9bbc4c28ed61f9"},
|
||||
{file = "apsw-3.50.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f843cf8306ebc1117000c2c09d5abd71b53d040212a01d6e4d0f6891ce666a21"},
|
||||
{file = "apsw-3.50.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:91b4d6f1964772b21904833a57971ea01a7149dbaa91792a60d2878c58dfbb1c"},
|
||||
{file = "apsw-3.50.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:358ff92550b24cb48a5c2d728ff54ac5627c97d93b632ff718b3d89bd9e63544"},
|
||||
{file = "apsw-3.50.0.0-cp311-cp311-win32.whl", hash = "sha256:5649e4ef077bd84ef521a09342048c9b86b17d3bec2a0d26e1e1e28be7fa6772"},
|
||||
{file = "apsw-3.50.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:6fb61cffb7aa1a29dfd18179aa9a4eea951c467750b4742e6bf6c69fdaee326c"},
|
||||
{file = "apsw-3.50.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0e9f74022b707e4b3e159dc7f29fd03b6f3a526544d71486e1a76ee14d15d940"},
|
||||
{file = "apsw-3.50.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:118a06db94c7460bd1b9311cb50298b9b7ebb079f71f3a934e79fc5106981255"},
|
||||
{file = "apsw-3.50.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c412050878e9dc70b1ba27da8756a18d6106f13428d185b8d05652c450152d8"},
|
||||
{file = "apsw-3.50.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6a77ac6987547ee5a64a477c9d0ba54f89c13068d4661567fc9b8a46f3d6c8a"},
|
||||
{file = "apsw-3.50.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1d91b61490c6746cf60782be15437727023a221f528dd8d834bf37925670fc8"},
|
||||
{file = "apsw-3.50.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:908e01da909c2f4a24733e37c34ecfdb62ad6d06edcd0a924a9f397a9d878195"},
|
||||
{file = "apsw-3.50.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5a379742e773be63b56ee32273eb2a67f63d2076747f967f59a4c35a6f7a0eee"},
|
||||
{file = "apsw-3.50.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:014da137aa6f1f1bf31e38b300f68baaa3eb600ddd27aedd9cfbb7fa25d5a3ac"},
|
||||
{file = "apsw-3.50.0.0-cp312-cp312-win32.whl", hash = "sha256:b80661bc26d68150ad1ee9438f535a6bd1a287b22ceb06e39f4a560691d61348"},
|
||||
{file = "apsw-3.50.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:29e244db28833b0657ba212a598733a2fc3be0b8daea36d389241a91833fdb5c"},
|
||||
{file = "apsw-3.50.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:42246a2bd568f7e87f63d4468cced6243914841e61f985ace2c8d903b97bb253"},
|
||||
{file = "apsw-3.50.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b79d5913faf188e3968febfe7c0b112290f5f4e8fe0dd100ffb2eda063ef1495"},
|
||||
{file = "apsw-3.50.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b70e6599c1aa56558eb1058446d9d313a38042040d137c6f01919d18aac4922"},
|
||||
{file = "apsw-3.50.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:320b621e96783af02a4276afca2635ae56ead6d2b4581ffb17e244beb3fc53bb"},
|
||||
{file = "apsw-3.50.0.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b93976b86019e7283c435ded6e6dbe02e46b0838335cafa3d5a1a75a375b663"},
|
||||
{file = "apsw-3.50.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a352e97278f8bb135e4015cadf628a5c06515daee8d7b9f51db3160464ee2e99"},
|
||||
{file = "apsw-3.50.0.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:74b8610fdca4dec899f732be64d4723b36305a8d08e3d27a20b9c930a4c28fca"},
|
||||
{file = "apsw-3.50.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bee4f3596ee14061fab19381762ee23d4b741ecdf70ab9c2ab917aeb01571f0a"},
|
||||
{file = "apsw-3.50.0.0-cp313-cp313-win32.whl", hash = "sha256:83830608741210fe229d4c5eb78df6de44eae43f1e76a1e85a4b24150b5b9c3e"},
|
||||
{file = "apsw-3.50.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:d4b9191395483171bff456b63639d8b25f1c6124867d60b66699b4594c7ee46e"},
|
||||
{file = "apsw-3.50.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2c775641dc754f4ac27d4d8141d21ce90427883e7bfb5ffa9ff83986a7dc190f"},
|
||||
{file = "apsw-3.50.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c12306a7e9f3a3542523cf1ad39de41d1b42fcffb9378cb22e43c6b449deb9ae"},
|
||||
{file = "apsw-3.50.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1f8e943a4e3fea6d39b404900f25196a5461e256c0af56a63233bb068f80a67"},
|
||||
{file = "apsw-3.50.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b66321b5412e85401a4752e0e1a279aba97ca11459037e5c9e4d7437b642802"},
|
||||
{file = "apsw-3.50.0.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20aa18ab216043f3bcf1ea88a4e10500cb197a6ad21e06d3a05fe40282f66020"},
|
||||
{file = "apsw-3.50.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e54327aec77bbab8cb9b97b75c660fa1e96181cfa6fe80f34ee45f370ba27b4d"},
|
||||
{file = "apsw-3.50.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:676dedd3cabea78e85a636fc4608c9b2e471b78e6dc21a5b8e9c3c99d3bfc0bc"},
|
||||
{file = "apsw-3.50.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c22e40b34a67737feae75cd719cdb3cda08c403965acd082d1fc830e9fec031d"},
|
||||
{file = "apsw-3.50.0.0-cp39-cp39-win32.whl", hash = "sha256:21c815c0edcfa18177eb2f4e0d90a3dff1bf5f5ff03b7a7c23e64e071e4ac49c"},
|
||||
{file = "apsw-3.50.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:4015a5daeba0df446e26ca88d33414e5262c88c6763ac51a5a94ebf48fb2ebcd"},
|
||||
{file = "apsw-3.50.0.0.tar.gz", hash = "sha256:104540af8231b23d01240a341d66fe94fac56bab707fdc159c35e42d354035d0"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "argparse"
|
||||
version = "1.4.0"
|
||||
|
|
@ -973,7 +1034,7 @@ description = "Read metadata from Python packages"
|
|||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "python_version < \"3.10\""
|
||||
markers = "python_version == \"3.9\""
|
||||
files = [
|
||||
{file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"},
|
||||
{file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"},
|
||||
|
|
@ -2023,7 +2084,7 @@ files = [
|
|||
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
|
||||
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
|
||||
]
|
||||
markers = {dev = "python_version < \"3.10\""}
|
||||
markers = {dev = "python_version == \"3.9\""}
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
|
|
@ -2254,4 +2315,4 @@ type = ["pytest-mypy"]
|
|||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = "^3.9, <3.13"
|
||||
content-hash = "0e1731401cd6acfc4d45ede5e18668530aae6a6b2e359d7dc8d8d635635a1257"
|
||||
content-hash = "d2681b890fa92a4a75406de2521b46047b72668bfb9fd54884454f1caa497191"
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ zipp = "^3.19.1"
|
|||
uvloop = "^0.21.0"
|
||||
pymysql = "^1.1.1"
|
||||
impacket = "^0.11.0"
|
||||
|
||||
apsw = "^3.50.0.0"
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
ipython = "^8.16.1"
|
||||
ruff = ">=0.2.1,<0.13.0"
|
||||
|
|
@ -94,6 +94,9 @@ profile = "black"
|
|||
addopts = "--cov=nettacker --cov-config=pyproject.toml --cov-report term --cov-report xml --dist loadscope --no-cov-on-fail --numprocesses auto"
|
||||
asyncio_default_fixture_loop_scope = "function"
|
||||
testpaths = ["tests"]
|
||||
markers = [
|
||||
"asyncio: mark test as async"
|
||||
]
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 99
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
<table><graph_html>/*css*/</table>datetargetmodule_nameportlogsjson_event<tr>nowx</tr></table><div id="json_length">1</div><p class="footer">Software Details: OWASP Nettacker version 1.0 [beta] in now ScanID: scan-id</p><script>/*js*/</script>
|
||||
|
|
@ -60,7 +60,7 @@ def test_load_with_service_discovery(
|
|||
mock_loader.return_value = mock_loader_inst
|
||||
|
||||
mock_find_events.return_value = [
|
||||
MagicMock(json_event='{"port": 80, "response": {"conditions_results": {"http": {}}}}')
|
||||
json.dumps({"port": 80, "response": {"conditions_results": {"http": {}}}})
|
||||
]
|
||||
|
||||
module = Module("test_module", options, **module_args)
|
||||
|
|
@ -94,11 +94,9 @@ def test_sort_loops(mock_loader, mock_find_events, options, module_args):
|
|||
}
|
||||
mock_loader.return_value = mock_loader_inst
|
||||
|
||||
mock_event = MagicMock()
|
||||
mock_event.json_event = json.dumps(
|
||||
{"port": 80, "response": {"conditions_results": {"http": True}}}
|
||||
)
|
||||
mock_find_events.return_value = [mock_event]
|
||||
mock_find_events.return_value = [
|
||||
json.dumps({"port": 80, "response": {"conditions_results": {"http": True}}})
|
||||
]
|
||||
|
||||
module = Module("test_module", options, **module_args)
|
||||
module.libraries = ["http"]
|
||||
|
|
@ -119,11 +117,9 @@ def test_start_unsupported_library(mock_loader, mock_find_events, options, modul
|
|||
}
|
||||
mock_loader.return_value = mock_loader_inst
|
||||
|
||||
mock_event = MagicMock()
|
||||
mock_event.json_event = json.dumps(
|
||||
{"port": 1234, "response": {"conditions_results": {"unsupported_lib": True}}}
|
||||
)
|
||||
mock_find_events.return_value = [mock_event]
|
||||
mock_find_events.return_value = [
|
||||
json.dumps({"port": 1234, "response": {"conditions_results": {"unsupported_lib": True}}})
|
||||
]
|
||||
|
||||
module = Module("test_module", options, **module_args)
|
||||
module.libraries = ["http"]
|
||||
|
|
@ -179,11 +175,9 @@ def test_sort_loops_behavior(mock_loader_cls, mock_find_events, mock_parse, opti
|
|||
# This one is painful
|
||||
mock_loader_cls.side_effect = template_loader_side_effect
|
||||
|
||||
mock_event = MagicMock()
|
||||
mock_event.json_event = json.dumps(
|
||||
{"port": 80, "response": {"conditions_results": {"http": True}}}
|
||||
)
|
||||
mock_find_events.return_value = [mock_event]
|
||||
mock_find_events.return_value = [
|
||||
json.dumps({"port": 80, "response": {"conditions_results": {"http": True}}})
|
||||
]
|
||||
|
||||
module = Module("test_module", options, **module_args)
|
||||
module.libraries = ["http"]
|
||||
|
|
@ -307,12 +301,8 @@ def test_load_appends_port_to_existing_protocol(
|
|||
|
||||
mock_loader_cls.side_effect = loader_side_effect_specific
|
||||
mock_find_events.return_value = [
|
||||
MagicMock(
|
||||
json_event=json.dumps({"port": 80, "response": {"conditions_results": {"http": {}}}})
|
||||
),
|
||||
MagicMock(
|
||||
json_event=json.dumps({"port": 443, "response": {"conditions_results": {"http": {}}}})
|
||||
),
|
||||
json.dumps({"port": 80, "response": {"conditions_results": {"http": {}}}}),
|
||||
json.dumps({"port": 443, "response": {"conditions_results": {"http": {}}}}),
|
||||
]
|
||||
|
||||
module = Module("test_module", options, **module_args)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue