|
1 | 1 | # ruff: noqa: PLW0603 |
2 | 2 | """Contains functions to use the BirdNET models.""" |
3 | 3 |
|
| 4 | +import logging |
4 | 5 | import os |
5 | 6 | import sys |
6 | 7 | import warnings |
7 | 8 |
|
| 9 | +import absl.logging |
8 | 10 | import numpy as np |
9 | 11 |
|
10 | 12 | import birdnet_analyzer.config as cfg |
11 | 13 | from birdnet_analyzer import utils |
12 | 14 |
|
13 | | -SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) |
14 | | - |
15 | | - |
| 15 | +absl.logging.set_verbosity(absl.logging.ERROR) |
| 16 | +logging.getLogger("tensorflow").setLevel(logging.ERROR) |
16 | 17 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" |
17 | 18 | os.environ["CUDA_VISIBLE_DEVICES"] = "" |
18 | | - |
19 | 19 | warnings.filterwarnings("ignore") |
20 | 20 |
|
21 | 21 | # Import TFLite from runtime or Tensorflow; |
|
29 | 29 | if not cfg.MODEL_PATH.endswith(".tflite"): |
30 | 30 | from tensorflow import keras |
31 | 31 |
|
| 32 | +SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) |
32 | 33 | INTERPRETER: tflite.Interpreter = None |
33 | 34 | C_INTERPRETER: tflite.Interpreter = None |
34 | 35 | M_INTERPRETER: tflite.Interpreter = None |
|
38 | 39 | EMPTY_CLASS_EXCEPTION_REF = None |
39 | 40 |
|
40 | 41 |
|
| 42 | +def _load_interpreter(mpath, threads): |
| 43 | + return tflite.Interpreter( |
| 44 | + model_path=mpath, |
| 45 | + num_threads=threads, |
| 46 | + # XNNPACK disabled, because it does not support variable inputsize anyway (ie batchsize) |
| 47 | + experimental_op_resolver_type=tflite.experimental.OpResolverType.BUILTIN_WITHOUT_DEFAULT_DELEGATES, |
| 48 | + ) |
| 49 | + |
| 50 | + |
41 | 51 | def get_empty_class_exception(): |
42 | 52 | import keras_tuner.errors |
43 | 53 |
|
@@ -361,11 +371,7 @@ def upsampling(x: np.ndarray, y: np.ndarray, ratio=0.5, mode="repeat"): |
361 | 371 | rng = np.random.default_rng(cfg.RANDOM_SEED) |
362 | 372 |
|
363 | 373 | # Determine min number of samples |
364 | | - min_samples = ( |
365 | | - int(max(y.sum(axis=0), len(y) - y.sum(axis=0)) * ratio) |
366 | | - if cfg.BINARY_CLASSIFICATION |
367 | | - else int(np.max(y.sum(axis=0)) * ratio) |
368 | | - ) |
| 374 | + min_samples = int(max(y.sum(axis=0), len(y) - y.sum(axis=0)) * ratio) if cfg.BINARY_CLASSIFICATION else int(np.max(y.sum(axis=0)) * ratio) |
369 | 375 |
|
370 | 376 | x_temp = [] |
371 | 377 | y_temp = [] |
@@ -516,9 +522,7 @@ def load_model(class_output=True): |
516 | 522 | if cfg.MODEL_PATH.endswith(".tflite"): |
517 | 523 | if not INTERPRETER: |
518 | 524 | # Load TFLite model and allocate tensors. |
519 | | - INTERPRETER = tflite.Interpreter( |
520 | | - model_path=os.path.join(SCRIPT_DIR, cfg.MODEL_PATH), num_threads=cfg.TFLITE_THREADS |
521 | | - ) |
| 525 | + INTERPRETER = _load_interpreter(os.path.join(SCRIPT_DIR, cfg.MODEL_PATH), cfg.TFLITE_THREADS) |
522 | 526 | INTERPRETER.allocate_tensors() |
523 | 527 |
|
524 | 528 | # Get input and output tensors. |
@@ -553,7 +557,7 @@ def load_custom_classifier(): |
553 | 557 |
|
554 | 558 | if cfg.CUSTOM_CLASSIFIER.endswith(".tflite"): |
555 | 559 | # Load TFLite model and allocate tensors. |
556 | | - C_INTERPRETER = tflite.Interpreter(model_path=cfg.CUSTOM_CLASSIFIER, num_threads=cfg.TFLITE_THREADS) |
| 560 | + C_INTERPRETER = _load_interpreter(cfg.CUSTOM_CLASSIFIER, cfg.TFLITE_THREADS) |
557 | 561 | C_INTERPRETER.allocate_tensors() |
558 | 562 |
|
559 | 563 | # Get input and output tensors. |
@@ -585,9 +589,7 @@ def load_meta_model(): |
585 | 589 | global M_OUTPUT_LAYER_INDEX |
586 | 590 |
|
587 | 591 | # Load TFLite model and allocate tensors. |
588 | | - M_INTERPRETER = tflite.Interpreter( |
589 | | - model_path=os.path.join(SCRIPT_DIR, cfg.MDATA_MODEL_PATH), num_threads=cfg.TFLITE_THREADS |
590 | | - ) |
| 592 | + M_INTERPRETER = _load_interpreter(os.path.join(SCRIPT_DIR, cfg.MDATA_MODEL_PATH), cfg.TFLITE_THREADS) |
591 | 593 | M_INTERPRETER.allocate_tensors() |
592 | 594 |
|
593 | 595 | # Get input and output tensors. |
@@ -633,11 +635,7 @@ def build_linear_classifier(num_labels, input_size, hidden_units=0, dropout=0.0) |
633 | 635 | model.add(keras.layers.Dropout(dropout)) |
634 | 636 |
|
635 | 637 | # Add a hidden layer with L2 regularization |
636 | | - model.add( |
637 | | - keras.layers.Dense( |
638 | | - hidden_units, activation="relu", kernel_regularizer=regularizer, kernel_initializer="he_normal" |
639 | | - ) |
640 | | - ) |
| 638 | + model.add(keras.layers.Dense(hidden_units, activation="relu", kernel_regularizer=regularizer, kernel_initializer="he_normal")) |
641 | 639 |
|
642 | 640 | # Add another batch normalization after the hidden layer |
643 | 641 | model.add(keras.layers.BatchNormalization()) |
@@ -813,9 +811,7 @@ def _focal_loss(y_true, y_pred): |
813 | 811 | ) |
814 | 812 |
|
815 | 813 | # Train model |
816 | | - history = classifier.fit( |
817 | | - x_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(x_val, y_val), callbacks=callbacks |
818 | | - ) |
| 814 | + history = classifier.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(x_val, y_val), callbacks=callbacks) |
819 | 815 |
|
820 | 816 | return classifier, history |
821 | 817 |
|
|
0 commit comments