Merge remote-tracking branch 'origin/automatica-1' into Ufa

main
Sergey Revyakin 4 weeks ago
commit eaacc8e3b7

9
.gitignore vendored

@ -144,6 +144,7 @@ celerybeat.pid
# Environments # Environments
.env .env
.env.bak*
.venv .venv
env/ env/
.venv/ .venv/
@ -188,5 +189,9 @@ runtime/
/.venv-*/* /.venv-*/*
train_scripts/models/* /models/ensemble_*/
train_scripts/models/ensemble_1.2_jpg_*/
NN_server/server.py.bak_streak_gate
*.npy
train_scripts/models/ensemble*/

@ -0,0 +1,197 @@
from torchvision import models
import torch.nn as nn
import matplotlib
import numpy as np
import torch
import cv2
import gc
import io
def _render_plot(values, figsize=(16, 16), dpi=16):
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
plt.axes(ylim=(-1, 1))
plt.plot(values, color="black")
plt.gca().set_axis_off()
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
if img is None:
raise RuntimeError("failed to decode plot image")
plt.clf()
plt.cla()
plt.close()
plt.close(fig)
return np.asarray(cv2.split(img), dtype=np.float32)
def pre_func_ensemble(data=None, src="", ind_inference=0):
try:
import matplotlib.pyplot as plt
matplotlib.use("Agg")
plt.ioff()
real = np.asarray(data[0], dtype=np.float32)
imag = np.asarray(data[1], dtype=np.float32)
signal = real + 1j * imag
img_real = _render_plot(signal.real)
img_mag = _render_plot(np.abs(signal))
cv2.destroyAllWindows()
gc.collect()
print("Подготовка данных завершена")
print()
return [img_real, img_mag]
except Exception as exc:
print(str(exc))
return None
def build_func_ensemble(file_model="", file_config="", num_classes=None):
try:
import matplotlib.pyplot as plt
matplotlib.use("Agg")
plt.ioff()
torch.cuda.empty_cache()
num_classes = 2
model1 = models.resnet18(pretrained=False)
model2 = models.resnet50(pretrained=False)
model1.fc = nn.Linear(model1.fc.in_features, num_classes)
model2.fc = nn.Linear(model2.fc.in_features, num_classes)
class Ensemble(nn.Module):
def __init__(self, model1, model2):
super().__init__()
self.model1 = model1
self.model2 = model2
self.fc = nn.Linear(2 * num_classes, num_classes)
def forward(self, x):
if isinstance(x, (list, tuple)):
x1 = x[0]
x2 = x[1] if len(x) > 1 else x[0]
else:
x1 = x
x2 = x
y1 = self.model1(x1)
y2 = self.model2(x2)
y = torch.cat((y1, y2), dim=1)
return self.fc(y)
model = Ensemble(model1, model2)
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cpu":
model = model.to(device)
model.load_state_dict(torch.load(file_model, map_location=device))
model.eval()
cv2.destroyAllWindows()
gc.collect()
print("Инициализация модели завершена")
print()
return model
except Exception as exc:
print(str(exc))
return None
def inference_func_ensemble(data=None, model=None, mapping=None, shablon=""):
try:
cv2.destroyAllWindows()
gc.collect()
torch.cuda.empty_cache()
device = "cuda" if torch.cuda.is_available() else "cpu"
if isinstance(data, (list, tuple)) and len(data) >= 2:
inputs = [
torch.unsqueeze(torch.tensor(data[0]).cpu(), 0).to(device).float(),
torch.unsqueeze(torch.tensor(data[1]).cpu(), 0).to(device).float(),
]
else:
tensor = torch.unsqueeze(torch.tensor(data).cpu(), 0).to(device).float()
inputs = [tensor, tensor]
with torch.no_grad():
output = model(inputs)
_, predict = torch.max(output.data, 1)
prediction = mapping[int(np.asarray(predict.cpu())[0])]
print("PREDICTION" + shablon + ": " + prediction)
output = output.cpu()
label = np.asarray(np.argmax(output, axis=1))[0]
output = np.asarray(torch.squeeze(output, 0))
expon = np.exp(output - np.max(output))
probability = round((expon / expon.sum())[label], 2)
cv2.destroyAllWindows()
gc.collect()
print("Уверенность" + shablon + " в предсказании: " + str(probability))
print("Инференс завершен")
print()
return [prediction, probability]
except Exception as exc:
print(str(exc))
return None
def post_func_ensemble(src="", model_type="", prediction="", model_id=0, ind_inference=0, data=None):
try:
import matplotlib.pyplot as plt
matplotlib.use("Agg")
plt.ioff()
if int(ind_inference) <= 100 and isinstance(data, (list, tuple)) and len(data) >= 2:
fig, ax = plt.subplots()
ax.imshow(np.moveaxis(data[0], 0, -1))
plt.savefig(src + "_inference_" + str(ind_inference) + "_" + prediction + "_real_" + str(model_id) + "_" + model_type + ".png")
plt.clf()
plt.cla()
plt.close(fig)
cv2.destroyAllWindows()
gc.collect()
fig, ax = plt.subplots()
ax.imshow(np.moveaxis(data[1], 0, -1))
plt.savefig(src + "_inference_" + str(ind_inference) + "_" + prediction + "_mod_" + str(model_id) + "_" + model_type + ".png")
plt.clf()
plt.cla()
plt.close(fig)
cv2.destroyAllWindows()
gc.collect()
plt.clf()
plt.cla()
plt.close()
cv2.destroyAllWindows()
gc.collect()
print("Постобработка завершена")
print()
except Exception as exc:
print(str(exc))
return None

@ -0,0 +1,197 @@
from torchvision import models
import torch.nn as nn
import matplotlib
import numpy as np
import torch
import cv2
import gc
import io
def _render_plot(values, figsize=(16, 16), dpi=16):
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
plt.axes(ylim=(-1, 1))
plt.plot(values, color="black")
plt.gca().set_axis_off()
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
if img is None:
raise RuntimeError("failed to decode plot image")
plt.clf()
plt.cla()
plt.close()
plt.close(fig)
return np.asarray(cv2.split(img), dtype=np.float32)
def pre_func_ensemble(data=None, src="", ind_inference=0):
try:
import matplotlib.pyplot as plt
matplotlib.use("Agg")
plt.ioff()
real = np.asarray(data[0], dtype=np.float32)
imag = np.asarray(data[1], dtype=np.float32)
signal = real + 1j * imag
img_real = _render_plot(signal.real)
img_mag = _render_plot(np.abs(signal))
cv2.destroyAllWindows()
gc.collect()
print("Подготовка данных завершена")
print()
return [img_real, img_mag]
except Exception as exc:
print(str(exc))
return None
def build_func_ensemble(file_model="", file_config="", num_classes=None):
try:
import matplotlib.pyplot as plt
matplotlib.use("Agg")
plt.ioff()
torch.cuda.empty_cache()
num_classes = 2
model1 = models.resnet18(pretrained=False)
model2 = models.resnet50(pretrained=False)
model1.fc = nn.Linear(model1.fc.in_features, num_classes)
model2.fc = nn.Linear(model2.fc.in_features, num_classes)
class Ensemble(nn.Module):
def __init__(self, model1, model2):
super().__init__()
self.model1 = model1
self.model2 = model2
self.fc = nn.Linear(2 * num_classes, num_classes)
def forward(self, x):
if isinstance(x, (list, tuple)):
x1 = x[0]
x2 = x[1] if len(x) > 1 else x[0]
else:
x1 = x
x2 = x
y1 = self.model1(x1)
y2 = self.model2(x2)
y = torch.cat((y1, y2), dim=1)
return self.fc(y)
model = Ensemble(model1, model2)
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cpu":
model = model.to(device)
model.load_state_dict(torch.load(file_model, map_location=device))
model.eval()
cv2.destroyAllWindows()
gc.collect()
print("Инициализация модели завершена")
print()
return model
except Exception as exc:
print(str(exc))
return None
def inference_func_ensemble(data=None, model=None, mapping=None, shablon=""):
try:
cv2.destroyAllWindows()
gc.collect()
torch.cuda.empty_cache()
device = "cuda" if torch.cuda.is_available() else "cpu"
if isinstance(data, (list, tuple)) and len(data) >= 2:
inputs = [
torch.unsqueeze(torch.tensor(data[0]).cpu(), 0).to(device).float(),
torch.unsqueeze(torch.tensor(data[1]).cpu(), 0).to(device).float(),
]
else:
tensor = torch.unsqueeze(torch.tensor(data).cpu(), 0).to(device).float()
inputs = [tensor, tensor]
with torch.no_grad():
output = model(inputs)
_, predict = torch.max(output.data, 1)
prediction = mapping[int(np.asarray(predict.cpu())[0])]
print("PREDICTION" + shablon + ": " + prediction)
output = output.cpu()
label = np.asarray(np.argmax(output, axis=1))[0]
output = np.asarray(torch.squeeze(output, 0))
expon = np.exp(output - np.max(output))
probability = round((expon / expon.sum())[label], 2)
cv2.destroyAllWindows()
gc.collect()
print("Уверенность" + shablon + " в предсказании: " + str(probability))
print("Инференс завершен")
print()
return [prediction, probability]
except Exception as exc:
print(str(exc))
return None
def post_func_ensemble(src="", model_type="", prediction="", model_id=0, ind_inference=0, data=None):
try:
import matplotlib.pyplot as plt
matplotlib.use("Agg")
plt.ioff()
if int(ind_inference) <= 100 and isinstance(data, (list, tuple)) and len(data) >= 2:
fig, ax = plt.subplots()
ax.imshow(np.moveaxis(data[0], 0, -1))
plt.savefig(src + "_inference_" + str(ind_inference) + "_" + prediction + "_real_" + str(model_id) + "_" + model_type + ".png")
plt.clf()
plt.cla()
plt.close(fig)
cv2.destroyAllWindows()
gc.collect()
fig, ax = plt.subplots()
ax.imshow(np.moveaxis(data[1], 0, -1))
plt.savefig(src + "_inference_" + str(ind_inference) + "_" + prediction + "_mod_" + str(model_id) + "_" + model_type + ".png")
plt.clf()
plt.cla()
plt.close(fig)
cv2.destroyAllWindows()
gc.collect()
plt.clf()
plt.cla()
plt.close()
cv2.destroyAllWindows()
gc.collect()
print("Постобработка завершена")
print()
except Exception as exc:
print(str(exc))
return None

@ -0,0 +1,197 @@
from torchvision import models
import torch.nn as nn
import matplotlib
import numpy as np
import torch
import cv2
import gc
import io
def _render_plot(values, figsize=(16, 16), dpi=16):
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
plt.axes(ylim=(-1, 1))
plt.plot(values, color="black")
plt.gca().set_axis_off()
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
if img is None:
raise RuntimeError("failed to decode plot image")
plt.clf()
plt.cla()
plt.close()
plt.close(fig)
return np.asarray(cv2.split(img), dtype=np.float32)
def pre_func_ensemble(data=None, src="", ind_inference=0):
try:
import matplotlib.pyplot as plt
matplotlib.use("Agg")
plt.ioff()
real = np.asarray(data[0], dtype=np.float32)
imag = np.asarray(data[1], dtype=np.float32)
signal = real + 1j * imag
img_real = _render_plot(signal.real)
img_mag = _render_plot(np.abs(signal))
cv2.destroyAllWindows()
gc.collect()
print("Подготовка данных завершена")
print()
return [img_real, img_mag]
except Exception as exc:
print(str(exc))
return None
def build_func_ensemble(file_model="", file_config="", num_classes=None):
try:
import matplotlib.pyplot as plt
matplotlib.use("Agg")
plt.ioff()
torch.cuda.empty_cache()
num_classes = 2
model1 = models.resnet18(pretrained=False)
model2 = models.resnet50(pretrained=False)
model1.fc = nn.Linear(model1.fc.in_features, num_classes)
model2.fc = nn.Linear(model2.fc.in_features, num_classes)
class Ensemble(nn.Module):
def __init__(self, model1, model2):
super().__init__()
self.model1 = model1
self.model2 = model2
self.fc = nn.Linear(2 * num_classes, num_classes)
def forward(self, x):
if isinstance(x, (list, tuple)):
x1 = x[0]
x2 = x[1] if len(x) > 1 else x[0]
else:
x1 = x
x2 = x
y1 = self.model1(x1)
y2 = self.model2(x2)
y = torch.cat((y1, y2), dim=1)
return self.fc(y)
model = Ensemble(model1, model2)
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cpu":
model = model.to(device)
model.load_state_dict(torch.load(file_model, map_location=device))
model.eval()
cv2.destroyAllWindows()
gc.collect()
print("Инициализация модели завершена")
print()
return model
except Exception as exc:
print(str(exc))
return None
def inference_func_ensemble(data=None, model=None, mapping=None, shablon=""):
try:
cv2.destroyAllWindows()
gc.collect()
torch.cuda.empty_cache()
device = "cuda" if torch.cuda.is_available() else "cpu"
if isinstance(data, (list, tuple)) and len(data) >= 2:
inputs = [
torch.unsqueeze(torch.tensor(data[0]).cpu(), 0).to(device).float(),
torch.unsqueeze(torch.tensor(data[1]).cpu(), 0).to(device).float(),
]
else:
tensor = torch.unsqueeze(torch.tensor(data).cpu(), 0).to(device).float()
inputs = [tensor, tensor]
with torch.no_grad():
output = model(inputs)
_, predict = torch.max(output.data, 1)
prediction = mapping[int(np.asarray(predict.cpu())[0])]
print("PREDICTION" + shablon + ": " + prediction)
output = output.cpu()
label = np.asarray(np.argmax(output, axis=1))[0]
output = np.asarray(torch.squeeze(output, 0))
expon = np.exp(output - np.max(output))
probability = round((expon / expon.sum())[label], 2)
cv2.destroyAllWindows()
gc.collect()
print("Уверенность" + shablon + " в предсказании: " + str(probability))
print("Инференс завершен")
print()
return [prediction, probability]
except Exception as exc:
print(str(exc))
return None
def post_func_ensemble(src="", model_type="", prediction="", model_id=0, ind_inference=0, data=None):
try:
import matplotlib.pyplot as plt
matplotlib.use("Agg")
plt.ioff()
if int(ind_inference) <= 100 and isinstance(data, (list, tuple)) and len(data) >= 2:
fig, ax = plt.subplots()
ax.imshow(np.moveaxis(data[0], 0, -1))
plt.savefig(src + "_inference_" + str(ind_inference) + "_" + prediction + "_real_" + str(model_id) + "_" + model_type + ".png")
plt.clf()
plt.cla()
plt.close(fig)
cv2.destroyAllWindows()
gc.collect()
fig, ax = plt.subplots()
ax.imshow(np.moveaxis(data[1], 0, -1))
plt.savefig(src + "_inference_" + str(ind_inference) + "_" + prediction + "_mod_" + str(model_id) + "_" + model_type + ".png")
plt.clf()
plt.cla()
plt.close(fig)
cv2.destroyAllWindows()
gc.collect()
plt.clf()
plt.cla()
plt.close()
cv2.destroyAllWindows()
gc.collect()
print("Постобработка завершена")
print()
except Exception as exc:
print(str(exc))
return None

@ -3,6 +3,7 @@ from dotenv import dotenv_values
from common.runtime import load_root_env, validate_env, as_int, as_str from common.runtime import load_root_env, validate_env, as_int, as_str
import os import os
import sys import sys
import re
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from Model import Model from Model import Model
import numpy as np import numpy as np
@ -49,13 +50,47 @@ validate_env("NN_server/server.py", {
}) })
config = dict(dotenv_values(ROOT_ENV)) config = dict(dotenv_values(ROOT_ENV))
def is_model_config_key(key, value):
return bool(re.fullmatch(r"NN_\d+", key or "")) and isinstance(value, str) and " && " in value
def get_required_drone_streak(freq):
raw_value = config.get(f"DRONE_STREAK_{freq}", "1")
try:
return max(1, int(raw_value))
except (TypeError, ValueError):
logging.warning("Invalid DRONE_STREAK_%s=%r, falling back to 1", freq, raw_value)
return 1
def update_drone_streak(freq, prediction):
if prediction == "drone":
drone_streaks[freq] = drone_streaks.get(freq, 0) + 1
else:
drone_streaks[freq] = 0
required = get_required_drone_streak(freq)
triggered = prediction == "drone" and drone_streaks[freq] >= required
logging.info(
"NN alarm gate freq=%s prediction=%s streak=%s/%s triggered=%s",
freq,
prediction,
drone_streaks[freq],
required,
triggered,
)
return 8 if triggered else 0
if not config: if not config:
raise RuntimeError("[NN_server/server.py] .env was loaded but no keys were parsed") raise RuntimeError("[NN_server/server.py] .env was loaded but no keys were parsed")
if not any(key.startswith("NN_") for key in config): if not any(is_model_config_key(key, value) for key, value in config.items()):
raise RuntimeError("[NN_server/server.py] no NN_* model entries configured") raise RuntimeError("[NN_server/server.py] no NN_* model entries configured")
logging.info("NN config loaded from %s", ROOT_ENV) logging.info("NN config loaded from %s", ROOT_ENV)
gen_server_ip = config['GENERAL_SERVER_IP'] gen_server_ip = config['GENERAL_SERVER_IP']
gen_server_port = config['GENERAL_SERVER_PORT'] gen_server_port = config['GENERAL_SERVER_PORT']
drone_streaks = {}
def init_data_for_inference(): def init_data_for_inference():
try: try:
@ -71,9 +106,9 @@ def init_data_for_inference():
try: try:
global model_list global model_list
for key in config.keys(): for key, value in config.items():
if key.startswith('NN_'): if is_model_config_key(key, value):
params = config[key].split(' && ') params = value.split(' && ')
module = importlib.import_module('Models.' + params[4]) module = importlib.import_module('Models.' + params[4])
classes = {} classes = {}
for value in params[9][1:-1].split(','): for value in params[9][1:-1].split(','):
@ -137,13 +172,7 @@ def receive_data():
print() print()
try: try:
result = 0 result = update_drone_streak(freq, prediction_list[0])
if (int(freq) == 2400 and (prediction_list[0] in ['drone', 'drone_noise'] or (prediction_list[0] == 'wifi' and float(probability) >= 0.95))) or (int(freq) == 1200 and (prediction_list[0] in ['drone'] and float(probability) >= 0.95)):
result += 8
if int(freq) in [915]:
result = 0
if int(freq) in []:
result = 8
data_to_send={ data_to_send={
'freq': str(freq), 'freq': str(freq),
'amplitude': result 'amplitude': result

@ -33,6 +33,8 @@ services:
- ../../common:/app/common - ../../common:/app/common
networks: networks:
- dronedetector-net - dronedetector-net
extra_hosts:
- "host.docker.internal:host-gateway"
dronedetector-nn-server: dronedetector-nn-server:
container_name: dronedetector-nn-server container_name: dronedetector-nn-server
@ -59,6 +61,7 @@ services:
- ../../.env:/app/.env:ro - ../../.env:/app/.env:ro
- ../../NN_server:/app/NN_server - ../../NN_server:/app/NN_server
- ../../common:/app/common - ../../common:/app/common
- ../../train_scripts:/app/train_scripts:ro
gpus: all gpus: all
networks: networks:
- dronedetector-net - dronedetector-net

@ -35,6 +35,405 @@ except Exception as exc:
EPS = 1e-20 EPS = 1e-20
@dataclass
class ScanWindow:
seq: int
start_mhz: float
end_mhz: float
low_mhz: float
high_mhz: float
center_mhz: float
status: str = "INIT"
rms: Optional[float] = None
power_lin: Optional[float] = None
dbfs: Optional[float] = None
samples: int = 0
updated_at: float = 0.0
error: str = ""
pass_no: int = 0
class WideProbeTop(gr.top_block):
def __init__(
self,
index: int,
center_freq_hz: float,
sample_rate: float,
vec_len: int,
gain: float,
if_gain: float,
bb_gain: float,
):
super().__init__("hackrf_energy_wide_probe")
self.probe = blocks.probe_signal_vc(vec_len)
self.stream_to_vec = blocks.stream_to_vector(gr.sizeof_gr_complex * 1, vec_len)
self.src = osmosdr.source(args=f"numchan=1 hackrf={index}")
self.src.set_time_unknown_pps(osmosdr.time_spec_t())
self.src.set_sample_rate(sample_rate)
self.src.set_center_freq(center_freq_hz, 0)
try:
self.src.set_freq_corr(0, 0)
except Exception:
pass
try:
self.src.set_gain_mode(False, 0)
except Exception:
pass
for fn, val in (("set_gain", gain), ("set_if_gain", if_gain), ("set_bb_gain", bb_gain)):
try:
getattr(self.src, fn)(val, 0)
except Exception:
pass
try:
self.src.set_bandwidth(0, 0)
except Exception:
pass
try:
self.src.set_antenna("", 0)
except Exception:
pass
self.connect((self.src, 0), (self.stream_to_vec, 0))
self.connect((self.stream_to_vec, 0), (self.probe, 0))
def tune(self, freq_hz: float) -> None:
self.src.set_center_freq(freq_hz, 0)
def read_metrics(self) -> Tuple[float, float, float, int]:
arr = np.asarray(self.probe.level(), dtype=np.complex64)
if arr.size == 0:
raise RuntimeError("no samples")
power_lin = float(np.mean(arr.real * arr.real + arr.imag * arr.imag))
rms = math.sqrt(max(power_lin, 0.0))
dbfs = 10.0 * math.log10(max(power_lin, EPS))
return rms, power_lin, dbfs, int(arr.size)
def read_window(self, settle: float, avg_reads: int, pause_between_reads: float) -> Tuple[float, float, float, int]:
if settle > 0:
time.sleep(settle)
read_count = max(1, avg_reads)
powers: List[float] = []
sample_sizes: List[int] = []
last_error: Optional[Exception] = None
for idx in range(read_count):
deadline = time.time() + 1.0
while True:
try:
_, power_lin, _, samples = self.read_metrics()
powers.append(power_lin)
sample_sizes.append(samples)
break
except Exception as exc:
last_error = exc
if time.time() >= deadline:
raise RuntimeError(str(last_error) if last_error else "no samples")
time.sleep(0.02)
if idx + 1 < read_count and pause_between_reads > 0:
time.sleep(pause_between_reads)
power_lin = float(sum(powers) / len(powers))
rms = math.sqrt(max(power_lin, 0.0))
dbfs = 10.0 * math.log10(max(power_lin, EPS))
samples = int(sum(sample_sizes) / len(sample_sizes))
return rms, power_lin, dbfs, samples
def parse_hackrf_info() -> Dict[str, int]:
try:
proc = subprocess.run(["hackrf_info"], capture_output=True, text=True, timeout=15)
except FileNotFoundError:
raise RuntimeError("hackrf_info not found")
except subprocess.TimeoutExpired:
raise RuntimeError("hackrf_info timeout")
text = (proc.stdout or "") + "\n" + (proc.stderr or "")
out: Dict[str, int] = {}
cur_idx: Optional[int] = None
for line in text.splitlines():
m = re.search(r"^Index:\s*(\d+)", line)
if m:
cur_idx = int(m.group(1))
continue
m = re.search(r"^Serial number:\s*([0-9a-fA-F]+)", line)
if m and cur_idx is not None:
out[m.group(1).lower()] = cur_idx
if not out:
raise RuntimeError("no devices parsed from hackrf_info")
return out
def fmt(value: Optional[float], spec: str) -> str:
return "-" if value is None else format(value, spec)
def build_windows(base_mhz: float, roof_mhz: float, step_mhz: float) -> List[ScanWindow]:
if step_mhz <= 0:
raise ValueError("step must be > 0")
if base_mhz == roof_mhz:
raise ValueError("base and roof must be different")
direction = -1.0 if roof_mhz < base_mhz else 1.0
edge = base_mhz
seq = 1
windows: List[ScanWindow] = []
while True:
next_edge = edge + direction * step_mhz
if direction < 0 and next_edge < roof_mhz:
next_edge = roof_mhz
if direction > 0 and next_edge > roof_mhz:
next_edge = roof_mhz
low_mhz = min(edge, next_edge)
high_mhz = max(edge, next_edge)
center_mhz = (low_mhz + high_mhz) / 2.0
windows.append(
ScanWindow(
seq=seq,
start_mhz=edge,
end_mhz=next_edge,
low_mhz=low_mhz,
high_mhz=high_mhz,
center_mhz=center_mhz,
)
)
if next_edge == roof_mhz:
break
edge = next_edge
seq += 1
return windows
def render(
windows: List[ScanWindow],
serial: str,
index: int,
sample_rate: float,
base_mhz: float,
roof_mhz: float,
step_mhz: float,
started_at: float,
pass_no: int,
current_seq: int,
) -> None:
now = time.time()
capture_bw_mhz = sample_rate / 1e6
current_row = next((row for row in windows if row.seq == current_seq), None)
best_row = max(
(row for row in windows if row.status == "OK" and row.dbfs is not None),
key=lambda row: row.dbfs if row.dbfs is not None else float("-inf"),
default=None,
)
print("\x1b[2J\x1b[H", end="")
print("HackRF Wide Energy Monitor (relative power: RMS / linear / dBFS)")
print(
f"serial: {serial} | idx: {index} | sample-rate: {capture_bw_mhz:.3f} MHz | "
f"scan: {base_mhz:.3f}->{roof_mhz:.3f} MHz step {step_mhz:.3f} MHz | "
f"pass: {pass_no} | uptime: {int(now-started_at)}s | {time.strftime('%Y-%m-%d %H:%M:%S')}"
)
print()
header = (
f"{'cur':>3} {'seq':>3} {'window MHz':>23} {'center':>9} {'status':>8} "
f"{'dBFS':>9} {'rms':>10} {'power':>12} {'N':>5} {'age':>5} error"
)
print(header)
print("-" * len(header))
for row in windows:
age = "-" if row.updated_at <= 0 else f"{(now-row.updated_at):.1f}"
err = row.error
if len(err) > 50:
err = err[:47] + "..."
marker = ">>>" if row.seq == current_seq else ""
print(
f"{marker:>3} {row.seq:>3} "
f"{f'{row.high_mhz:.3f}-{row.low_mhz:.3f}':>23} {row.center_mhz:>9.3f} {row.status:>8} "
f"{fmt(row.dbfs, '.2f'):>9} {fmt(row.rms, '.6f'):>10} {fmt(row.power_lin, '.8f'):>12} "
f"{row.samples:>5} {age:>5} {err}"
)
print()
if best_row is not None:
best_age = "-" if best_row.updated_at <= 0 else f"{(now-best_row.updated_at):.1f}"
print(
f"{'':>3} {'MAX':>3} "
f"{f'{best_row.high_mhz:.3f}-{best_row.low_mhz:.3f}':>23} {best_row.center_mhz:>9.3f} {best_row.status:>8} "
f"{fmt(best_row.dbfs, '.2f'):>9} {fmt(best_row.rms, '.6f'):>10} {fmt(best_row.power_lin, '.8f'):>12} "
f"{best_row.samples:>5} {best_age:>5} pass={best_row.pass_no}"
)
elif current_row is not None:
current_age = "-" if current_row.updated_at <= 0 else f"{(now-current_row.updated_at):.1f}"
print(
f"{'':>3} {'MAX':>3} "
f"{f'{current_row.high_mhz:.3f}-{current_row.low_mhz:.3f}':>23} {current_row.center_mhz:>9.3f} {'INIT':>8} "
f"{fmt(None, '.2f'):>9} {fmt(None, '.6f'):>10} {fmt(None, '.8f'):>12} "
f"{0:>5} {current_age:>5} no successful windows yet"
)
print("Ctrl+C to stop. Window width equals step; sample-rate must be >= step to cover each window.")
sys.stdout.flush()
def build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Retune one HackRF across a wide frequency range and measure energy")
parser.add_argument("--serial", required=True, help="HackRF serial number from hackrf_info")
parser.add_argument("--sample-rate", type=float, required=True, help="Sample rate in Hz")
parser.add_argument("--base", type=float, required=True, help="Scan start edge in MHz")
parser.add_argument("--roof", type=float, required=True, help="Scan end edge in MHz")
parser.add_argument("--step", type=float, required=True, help="Window width / retune step in MHz")
parser.add_argument("--vec-len", type=int, default=4096, help="Probe vector length")
parser.add_argument("--settle", type=float, default=0.12, help="Wait time after retune before reading (s)")
parser.add_argument("--avg-reads", type=int, default=3, help="How many probe reads to average per window")
parser.add_argument("--pause-between-reads", type=float, default=0.02, help="Pause between averaged reads (s)")
parser.add_argument("--passes", type=int, default=0, help="Number of sweep passes, 0 means infinite")
parser.add_argument("--gain", type=float, default=16.0, help="General gain")
parser.add_argument("--if-gain", type=float, default=16.0, help="IF gain")
parser.add_argument("--bb-gain", type=float, default=16.0, help="BB gain")
return parser
def main() -> int:
args = build_parser().parse_args()
serial = args.serial.lower()
try:
windows = build_windows(args.base, args.roof, args.step)
except ValueError as exc:
print(f"invalid scan range: {exc}", file=sys.stderr)
return 2
step_hz = args.step * 1e6
if args.sample_rate < step_hz:
print(
f"sample-rate {args.sample_rate:.0f} Hz is smaller than step window {step_hz:.0f} Hz; "
"this would leave gaps in the scan",
file=sys.stderr,
)
return 2
try:
serial_to_index = parse_hackrf_info()
except Exception as exc:
print(f"hackrf discovery failed: {exc}", file=sys.stderr)
return 3
index = serial_to_index.get(serial)
if index is None:
print(f"serial {serial} not found in hackrf_info", file=sys.stderr)
print("available serials:", file=sys.stderr)
for item_serial, item_index in sorted(serial_to_index.items(), key=lambda item: item[1]):
print(f" idx={item_index} serial={item_serial}", file=sys.stderr)
return 4
stop_requested = False
def on_signal(signum, frame):
nonlocal stop_requested
stop_requested = True
signal.signal(signal.SIGINT, on_signal)
signal.signal(signal.SIGTERM, on_signal)
probe: Optional[WideProbeTop] = None
started_at = time.time()
pass_no = 0
current_seq = windows[0].seq
try:
probe = WideProbeTop(
index=index,
center_freq_hz=windows[0].center_mhz * 1e6,
sample_rate=args.sample_rate,
vec_len=args.vec_len,
gain=args.gain,
if_gain=args.if_gain,
bb_gain=args.bb_gain,
)
probe.start()
time.sleep(max(args.settle, 0.12))
while not stop_requested:
pass_no += 1
for row in windows:
if stop_requested:
break
current_seq = row.seq
try:
probe.tune(row.center_mhz * 1e6)
rms, power_lin, dbfs, samples = probe.read_window(
settle=args.settle,
avg_reads=args.avg_reads,
pause_between_reads=args.pause_between_reads,
)
row.status = "OK"
row.rms = rms
row.power_lin = power_lin
row.dbfs = dbfs
row.samples = samples
row.error = ""
row.updated_at = time.time()
row.pass_no = pass_no
except Exception as exc:
row.status = "ERR"
row.error = str(exc)
row.updated_at = time.time()
render(
windows=windows,
serial=serial,
index=index,
sample_rate=args.sample_rate,
base_mhz=args.base,
roof_mhz=args.roof,
step_mhz=args.step,
started_at=started_at,
pass_no=pass_no,
current_seq=current_seq,
)
if args.passes > 0 and pass_no >= args.passes:
break
except Exception as exc:
print(f"scanner failed: {exc}", file=sys.stderr)
return 5
finally:
if probe is not None:
try:
probe.stop()
probe.wait()
except Exception:
pass
return 0
if __name__ == "__main__":
raise SystemExit(main())
#!/usr/bin/env python3
import argparse
import math
import re
import signal
import subprocess
import sys
import time
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
try:
import numpy as np
except Exception as exc:
print(f"numpy import failed: {exc}", file=sys.stderr)
sys.exit(1)
try:
from gnuradio import blocks, gr
import osmosdr
except Exception as exc:
print(f"gnuradio/osmosdr import failed: {exc}", file=sys.stderr)
print("Run with the SDR venv, e.g. .venv-sdr/bin/python read_energy_wide.py", file=sys.stderr)
sys.exit(1)
EPS = 1e-20
@dataclass @dataclass
class ScanWindow: class ScanWindow:
seq: int seq: int

@ -73,7 +73,6 @@ send_to_jammer_flag = as_bool(os.getenv('send_to_jammer_flag', '0'))
latitude = float(os.getenv('latitude')) latitude = float(os.getenv('latitude'))
longitude = float(os.getenv('longitude')) longitude = float(os.getenv('longitude'))
i = 0
flag = 0 flag = 0
max_len_bulk = 1 max_len_bulk = 1
bulk_data = [] bulk_data = []
@ -98,6 +97,32 @@ freqs_alarm = {freq: 0 for freq in freqs}
# 4. Добавить print, только если deub_module_flag. # 4. Добавить print, только если deub_module_flag.
def ensure_sending_data_task():
global sending_data_task
if sending_data_task is None or sending_data_task.done():
sending_data_task = asyncio.create_task(sending_data())
async def stop_sending_data_task():
global sending_data_task
if sending_data_task is None:
return
task = sending_data_task
sending_data_task = None
if task.done():
return
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
############################################################################ ############################################################################
# GPS MODULE - INACTIVE # GPS MODULE - INACTIVE
############################################################################ ############################################################################
@ -299,13 +324,10 @@ async def sending_data():
от текущего статуса тревоги (аларм/не аларм). от текущего статуса тревоги (аларм/не аларм).
""" """
global i
global alarm global alarm
global jammer_event global jammer_event
if i == 0:
while True: while True:
i=1
print('while true!') print('while true!')
ModuleDataSingleV2 = await agregate_data(deepcopy(data_queue)) ModuleDataSingleV2 = await agregate_data(deepcopy(data_queue))
if send_to_master_flag: if send_to_master_flag:
@ -315,8 +337,8 @@ async def sending_data():
# Если перед отправкой на мастер все было чисто, то ждем 60 сек. # Если перед отправкой на мастер все было чисто, то ждем 60 сек.
# Если во время этих 60 сек. пришел пакет с алармом, то рассматриваем ситуации: # Если во время этих 60 сек. пришел пакет с алармом, то рассматриваем ситуации:
if not alarm: if not alarm:
for i in range(passive_interval_to_send, 0, -1): for countdown in range(passive_interval_to_send, 0, -1):
print('ТАЙМЕР ', i) print('ТАЙМЕР ', countdown)
await asyncio.sleep(1) await asyncio.sleep(1)
if alarm: if alarm:
break break
@ -336,7 +358,6 @@ async def sending_data():
# В случае аларма ждем секунду перед новой отправкой данных. # В случае аларма ждем секунду перед новой отправкой данных.
if alarm: if alarm:
await asyncio.sleep(active_interval_to_send) await asyncio.sleep(active_interval_to_send)
i = 0
@app.post('/waterfall') @app.post('/waterfall')
@ -410,10 +431,8 @@ async def jammer_active():
global jammer_event global jammer_event
global freqs_alarm global freqs_alarm
global sending_data_task
if sending_data_task is not None: await stop_sending_data_task()
sending_data_task.cancel()
freqs_alarm = {freq: 0 for freq in freqs} freqs_alarm = {freq: 0 for freq in freqs}
jammer_event = True jammer_event = True
@ -437,11 +456,10 @@ async def jammer_deactive():
global jammer_event global jammer_event
global alarm global alarm
global sending_data_task
alarm = False alarm = False
jammer_event = False jammer_event = False
set_jammer_active(False) set_jammer_active(False)
sending_data_task = asyncio.create_task(sending_data()) ensure_sending_data_task()
print('ОТКЛЮАЕМ ПОДАВИТЕЛЬ ААААААААААААААААААААААААААААААААААААААААААААААААА!!!!') print('ОТКЛЮАЕМ ПОДАВИТЕЛЬ ААААААААААААААААААААААААААААААААААААААААААААААААА!!!!')
print('-' * 20) print('-' * 20)
@ -493,9 +511,9 @@ async def jam_server():
if data_from_jam_server['type'] == 'run': if data_from_jam_server['type'] == 'run':
alarm_status = (data_from_jam_server['data'])['state'] alarm_status = (data_from_jam_server['data'])['state']
print(alarm_status) print(alarm_status)
if alarm_status: if alarm_status and not jammer_event:
await jammer_active() await jammer_active()
else: elif not alarm_status and jammer_event:
await jammer_deactive() await jammer_deactive()
except Exception as e: except Exception as e:
jam_server_connect = None jam_server_connect = None
@ -511,10 +529,9 @@ async def startup_event():
Запускаем параллельно задачи jam_server и sending_data. Запускаем параллельно задачи jam_server и sending_data.
""" """
global sending_data_task
set_jammer_active(False) set_jammer_active(False)
asyncio.create_task(jam_server()) asyncio.create_task(jam_server())
sending_data_task = asyncio.create_task(sending_data()) ensure_sending_data_task()
if __name__ == '__main__': if __name__ == '__main__':

File diff suppressed because one or more lines are too long
Loading…
Cancel
Save