File size: 5,074 Bytes
f7fb447
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab70807
 
 
f7fb447
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab70807
 
 
f7fb447
ab70807
 
 
 
 
 
 
f7fb447
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
"""Core processing for AIRA module."""
import numpy as np
from dataclasses import dataclass
from plotly import graph_objects as go

from aira.engine.input import InputProcessorChain, InputMode
from aira.engine.intensity import (
    convert_bformat_to_intensity,
    analysis_crop_2d,
    integrate_intensity_directions,
    intensity_thresholding,
)
from aira.engine.pressure import w_channel_preprocess
from aira.engine.plot import hedgehog, w_channel, setup_plotly_layout, get_xy_projection
from aira.engine.reflections import detect_reflections
from aira.utils import read_signals_dict, cartesian_to_spherical


@dataclass
class AmbisonicsImpulseResponseAnalyzer:
    """Main class for analyzing Ambisonics impulse responses"""

    input_builder = InputProcessorChain()

    def analyze(
        self,
        input_dict: dict,
        integration_time: float,
        intensity_threshold: float,
        analysis_length: float,
        show: bool = False,
    ) -> go.Figure:
        """Analyzes a set of measurements in Ambisonics format and plots a hedgehog
        with the estimated reflections direction.

        Parameters
        ----------
        input_dict : dict
            Dictionary with all the data needed to analyze a set of measurements
            (paths of the measurements, input mode, channels per file, etc.)
        integration_time : float, optional
            Time frame where intensity vectors are integrated by the mean of them,
            by default INTEGRATION_TIME
        intensity_threshold : float, optional
            Bottom limit for intensity values in dB, by default INTENSITY_THRESHOLD
        analysis_length : float, optional
            Total time of analysis from intensity max peak, by default ANALYSIS_LENGTH
        show : bool, optional
            Shows plotly figure in browser, by default False

        Returns
        -------
        go.Figure
            Plotly figure with hedgehog and w-channel plot
        """

        signals_dict = read_signals_dict(input_dict)
        sample_rate = signals_dict["sample_rate"]

        bformat_signals = self.input_builder.process(input_dict)

        intensity_directions = convert_bformat_to_intensity(bformat_signals)

        intensity_directions_cropped = analysis_crop_2d(
            analysis_length, sample_rate, intensity_directions
        )

        intensity_windowed, time = integrate_intensity_directions(
            intensity_directions_cropped, integration_time, sample_rate
        )

        intensity, azimuth, elevation = cartesian_to_spherical(intensity_windowed)

        (
            intensity_peaks,
            azimuth_peaks,
            elevation_peaks,
            reflections_idx,
        ) = detect_reflections(intensity, azimuth, elevation)

        (
            reflex_to_direct,
            azimuth_peaks,
            elevation_peaks,
            reflections_idx,
        ) = intensity_thresholding(
            intensity_threshold,
            intensity_peaks,
            azimuth_peaks,
            elevation_peaks,
            reflections_idx,
        )

        time = time[reflections_idx]

        fig = setup_plotly_layout()

        hedgehog(fig, time, reflex_to_direct, azimuth_peaks, elevation_peaks)

        w_channel_signal = w_channel_preprocess(
            bformat_signals[0, :],
            int(integration_time * sample_rate),
            analysis_length,
            sample_rate,
        )

        w_channel(
            fig,
            np.arange(0, analysis_length, 1 / sample_rate) * 1000,
            w_channel_signal,
            intensity_threshold,
            time,
        )

        if show:
            fig.show()
        return fig

    def export_xy_projection(self, fig: go.Figure, img_name: str):
        new_fig = get_xy_projection(fig)
        new_fig.write_image(img_name, format="png")


if __name__ == "__main__":
    # Regio theater
    data = {
        "front_left_up": "test/mock_data/regio_theater/soundfield_flu.wav",
        "front_right_down": "test/mock_data/regio_theater/soundfield_frd.wav",
        "back_right_up": "test/mock_data/regio_theater/soundfield_bru.wav",
        "back_left_down": "test/mock_data/regio_theater/soundfield_bld.wav",
        "inverse_filter": "test/mock_data/regio_theater/soundfield_inverse_filter.wav",
        "input_mode": InputMode.LSS,
        "channels_per_file": 1,
        "frequency_correction": True,
    }

    # York auditorium
    # data = {
    #     "stacked_signals": "test/mock_data/york_auditorium/s2r2.wav",
    #     "input_mode": InputMode.BFORMAT,
    #     "channels_per_file": 4,
    #     "frequency_correction": False,
    # }
    integration_time = 0.01
    intensity_threshold = -50
    analysis_length = 0.3
    analyzer = AmbisonicsImpulseResponseAnalyzer()
    fig = analyzer.analyze(
        input_dict=data,
        integration_time=integration_time,
        intensity_threshold=intensity_threshold,
        analysis_length=analysis_length,
        show=True,
    )
    analyzer.export_xy_projection(fig, "projection.png")