Skip to content

Commit 6a3bd79

Browse files
authored
Add support for multichannel waveforms (#4)
So far, we've only supported showing mixed down track. This PR adds a support for showing multichannel waveforms one below the other (similar to Audacity).
1 parent d8135f1 commit 6a3bd79

7 files changed

Lines changed: 83 additions & 40 deletions

File tree

Sources/AudioSnapshotTesting/AVAudioPCMBufferExtensions.swift

Lines changed: 34 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -7,35 +7,46 @@ extension AVAudioPCMBuffer {
77
/// this function reduces this by choosing absolute max sample value
88
/// in a bucket of samples.
99
/// Returns a pair of buckets and max bucket value
10-
func reduce(bucketCount: Int) -> ([Float], Float) {
10+
func reduce(bucketCount: Int, mono: Bool = true) -> [DownsampledChannelData] {
1111
let frameCount = Int(self.frameLength)
12-
guard frameCount > 0 else { return ([], 0) }
13-
let mono = mixToMono()
14-
guard let mono = mono.floatChannelData else {
12+
guard frameCount > 0 else { return [DownsampledChannelData(channel: 0, samples: [], max: 0)] }
13+
let buffer = mono ? mixToMono() : self
14+
guard let data = buffer.floatChannelData else {
1515
fatalError("Not a float audio format")
1616
}
17-
let samples = Array(UnsafeBufferPointer(start: mono[0], count: frameCount))
18-
let samplesPerBucket = max(1, Double(frameCount) / Double(bucketCount))
19-
20-
var buckets = [Float](repeating: 0, count: bucketCount)
21-
var maxBucket: Float = 0
22-
for i in 0..<bucketCount {
23-
let bucketStart = Int(Double(i) * samplesPerBucket)
24-
let bucketEnd = min(bucketStart + Int(samplesPerBucket), frameCount)
25-
guard bucketStart < bucketEnd else { break }
26-
let bucketSamples = samples[bucketStart..<bucketEnd]
27-
let avgSample = bucketSamples.reduce(into: Float(0)) { currentMax, value in
28-
if abs(value) > abs(currentMax) {
29-
currentMax = value
30-
}
31-
}
32-
buckets[i] = avgSample
33-
if abs(avgSample) > maxBucket {
34-
maxBucket = abs(avgSample)
17+
return (0..<Int(buffer.format.channelCount)).map {
18+
process(channel: $0, data: data[$0], frameCount: frameCount, bucketCount: bucketCount)
19+
}
20+
}
21+
}
22+
23+
private func process(
24+
channel: Int,
25+
data: UnsafeMutablePointer<Float>,
26+
frameCount: Int,
27+
bucketCount: Int
28+
) -> DownsampledChannelData {
29+
let samples = Array(UnsafeBufferPointer(start: data, count: frameCount))
30+
let samplesPerBucket = max(1, Double(frameCount) / Double(bucketCount))
31+
32+
var buckets = [Float](repeating: 0, count: bucketCount)
33+
var maxBucket: Float = 0
34+
for i in 0..<bucketCount {
35+
let bucketStart = Int(Double(i) * samplesPerBucket)
36+
let bucketEnd = min(bucketStart + Int(samplesPerBucket), frameCount)
37+
guard bucketStart < bucketEnd else { break }
38+
let bucketSamples = samples[bucketStart..<bucketEnd]
39+
let avgSample = bucketSamples.reduce(into: Float(0)) { currentMax, value in
40+
if abs(value) > abs(currentMax) {
41+
currentMax = value
3542
}
3643
}
37-
return (buckets, maxBucket)
44+
buckets[i] = avgSample
45+
if abs(avgSample) > maxBucket {
46+
maxBucket = abs(avgSample)
47+
}
3848
}
49+
return DownsampledChannelData(channel: channel, samples: buckets, max: maxBucket)
3950
}
4051

4152
private extension AVAudioPCMBuffer {

Sources/AudioSnapshotTesting/AudioSnapshotTesting.swift

Lines changed: 10 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -21,25 +21,20 @@ public extension Snapshotting where Format == PlatformImage, Value == (AVAudioPC
2121
/// - width: The width of the resulting image.
2222
/// - height: The height of the resulting image.
2323
/// - strategy: The strategy to use when generating the waveform. Defaults to `.joinedLines`.
24-
static func waveform(width: Int, height: Int, strategy: WaveformStrategy = .joinedLines) -> Snapshotting {
24+
/// - mono: A boolean indicating whether to mix down to a mono signal before generating the waveform. Defaults to `true`.
25+
static func waveform(width: Int, height: Int, strategy: WaveformStrategy = .joinedLines, mono: Bool = true) -> Snapshotting {
2526
Snapshotting<PlatformView, PlatformImage>.image(size: .init(width: width, height: height))
2627
.pullback { buffer1, buffer2 in
27-
let (buckets1, max1) = buffer1.reduce(bucketCount: width)
28-
let (buckets2, max2) = buffer2.reduce(bucketCount: width)
29-
let data1 = buckets1.enumerated().map(Bucket.init)
30-
let data2 = buckets2.enumerated().map(Bucket.init)
3128
let verticalPadding: CGFloat = 4
3229
let waveformHeight = CGFloat(height) - (verticalPadding * 2)
33-
let waveform1 = WaveformView(
34-
buckets: data1,
35-
absMax: max1,
30+
let waveform1 = MultiChannelWaveformView(
31+
data: buffer1.reduce(bucketCount: width, mono: mono),
3632
height: waveformHeight,
3733
color: .red,
3834
strategy: strategy
3935
)
40-
let waveform2 = WaveformView(
41-
buckets: data2,
42-
absMax: max2,
36+
let waveform2 = MultiChannelWaveformView(
37+
data: buffer2.reduce(bucketCount: width, mono: mono),
4338
height: waveformHeight,
4439
color: .green,
4540
strategy: strategy
@@ -62,16 +57,14 @@ public extension Snapshotting where Format == PlatformImage, Value == AVAudioPCM
6257
/// - width: The width of the resulting image.
6358
/// - height: The height of the resulting image.
6459
/// - strategy: The strategy to use when generating the waveform. Defaults to `.joinedLines`.
65-
static func waveform(width: Int, height: Int, strategy: WaveformStrategy = .joinedLines) -> Snapshotting {
60+
/// - mono: A boolean indicating whether to mix down to a mono signal before generating the waveform. Defaults to `true`.
61+
static func waveform(width: Int, height: Int, strategy: WaveformStrategy = .joinedLines, mono: Bool = true) -> Snapshotting {
6662
Snapshotting<PlatformView, PlatformImage>.image(size: .init(width: width, height: height))
6763
.pullback { buffer in
6864
let verticalPadding: CGFloat = 4
6965
let waveformHeight = CGFloat(height) - (verticalPadding * 2)
70-
let (buckets, max) = buffer.reduce(bucketCount: width)
71-
let data = buckets.enumerated().map(Bucket.init)
72-
let waveform = WaveformView(
73-
buckets: data,
74-
absMax: max,
66+
let waveform = MultiChannelWaveformView(
67+
data: buffer.reduce(bucketCount: width, mono: mono),
7568
height: waveformHeight,
7669
color: .red,
7770
strategy: strategy
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
struct DownsampledChannelData: Identifiable {
2+
let channel: Int
3+
let samples: [Float]
4+
let max: Float
5+
6+
var id: Int { channel }
7+
}
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import SwiftUI
2+
3+
struct MultiChannelWaveformView: View {
4+
let data: [DownsampledChannelData]
5+
let height: CGFloat
6+
let color: Color
7+
var strategy: WaveformStrategy = .joinedLines
8+
9+
var body: some View {
10+
VStack(spacing: 4) {
11+
let waveformHeight = height / CGFloat(data.count)
12+
ForEach(data) {
13+
WaveformView(
14+
buckets: $0.samples.enumerated().map(Bucket.init),
15+
absMax: $0.max,
16+
height: waveformHeight,
17+
color: color,
18+
strategy: strategy
19+
)
20+
}
21+
}
22+
}
23+
}

Tests/AudioSnapshotTestingTests/AudioSnapshotTestingTests.swift

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,15 @@ func fileWaveformMetronome() async throws {
2727
)
2828
}
2929

30+
@Test(.snapshots(record: false, diffTool: .ksdiff))
31+
@MainActor
32+
func stereoFileWaveform() async throws {
33+
assertSnapshot(
34+
of: try AVAudioPCMBuffer.read(wave: "left-right-metronome"),
35+
as: .waveform(width: 4000, height: 1000, strategy: .individualLines, mono: false)
36+
)
37+
}
38+
3039
@Test(.snapshots(record: false, diffTool: .ksdiff))
3140
@MainActor
3241
func fileWaveformOverlay() async throws {
Binary file not shown.
104 KB
Loading

0 commit comments

Comments
 (0)