diff --git a/CHANGELOG.md b/CHANGELOG.md
index d6df59e6e9..d964ae03c1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -151,6 +151,7 @@ Breaking changes in this release:
- 👷🏻 Added `npm run build-browser` script for building test harness package only, in PR [#5667](https://github.com/microsoft/BotFramework-WebChat/pull/5667), by [@compulim](https://github.com/compulim)
- Added pull-based capabilities system for dynamically discovering adapter capabilities at runtime, in PR [#5679](https://github.com/microsoft/BotFramework-WebChat/pull/5679), by [@pranavjoshi001](https://github.com/pranavjoshi001)
- Added Speech-to-Speech (S2S) support for real-time voice conversations, in PR [#5654](https://github.com/microsoft/BotFramework-WebChat/pull/5654), by [@pranavjoshi](https://github.com/pranavjoshi001)
+- Added core mute/unmute functionality for speech-to-speech via `useRecorder` hook (silent chunks keep server connection alive), in PR [#5688](https://github.com/microsoft/BotFramework-WebChat/pull/5688), by [@pranavjoshi](https://github.com/pranavjoshi001)
### Changed
diff --git a/__tests__/assets/esm/speechToSpeech/mockMediaDevices.js b/__tests__/assets/esm/speechToSpeech/mockMediaDevices.js
index fb69332445..cabe283cda 100644
--- a/__tests__/assets/esm/speechToSpeech/mockMediaDevices.js
+++ b/__tests__/assets/esm/speechToSpeech/mockMediaDevices.js
@@ -32,6 +32,7 @@ export function setupMockMediaDevices() {
const node = context.createGain();
const channel = new MessageChannel();
let recording = false;
+ let muted = false;
let intervalId = null;
node.port = channel.port1;
@@ -42,13 +43,21 @@ export function setupMockMediaDevices() {
channel.port2.onmessage = ({ data }) => {
if (data.command === 'START') {
recording = true;
+ muted = false;
const bufferSize = options?.processorOptions?.bufferSize || 2400;
// Send chunks at ~100ms intervals while recording
// Use port2.postMessage so port1.onmessage (set by real code) receives it
intervalId = setInterval(() => {
if (recording) {
- channel.port2.postMessage({ eventType: 'audio', audioData: new Float32Array(bufferSize) });
+ // Float32Array defaults to zeros (silent), fill with sine wave when not muted
+ const audioData = new Float32Array(bufferSize);
+ if (!muted) {
+ for (let i = 0; i < bufferSize; i++) {
+ audioData[+i] = Math.sin(i * 0.1) * 0.5;
+ }
+ }
+ channel.port2.postMessage({ eventType: 'audio', audioData });
}
}, 100);
} else if (data.command === 'STOP') {
@@ -57,6 +66,10 @@ export function setupMockMediaDevices() {
clearInterval(intervalId);
intervalId = null;
}
+ } else if (data.command === 'MUTE') {
+ muted = true;
+ } else if (data.command === 'UNMUTE') {
+ muted = false;
}
};
diff --git a/__tests__/html2/speechToSpeech/mute.unmute.html b/__tests__/html2/speechToSpeech/mute.unmute.html
new file mode 100644
index 0000000000..396060f495
--- /dev/null
+++ b/__tests__/html2/speechToSpeech/mute.unmute.html
@@ -0,0 +1,214 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packages/api/src/boot/hook.ts b/packages/api/src/boot/hook.ts
index dda4464d10..a4e8108168 100644
--- a/packages/api/src/boot/hook.ts
+++ b/packages/api/src/boot/hook.ts
@@ -76,6 +76,7 @@ export {
useUIState,
useUserID,
useUsername,
+ useVoiceRecordingMuted,
useVoiceSelector,
useVoiceState
} from '../hooks/index';
diff --git a/packages/api/src/hooks/index.ts b/packages/api/src/hooks/index.ts
index f1fb41c3ce..f206ca7ec3 100644
--- a/packages/api/src/hooks/index.ts
+++ b/packages/api/src/hooks/index.ts
@@ -73,6 +73,7 @@ import useTrackTiming from './useTrackTiming';
import useUIState from './useUIState';
import useUserID from './useUserID';
import useUsername from './useUsername';
+import useVoiceRecordingMuted from './useVoiceRecordingMuted';
import useVoiceSelector from './useVoiceSelector';
import useVoiceState from './useVoiceState';
@@ -155,6 +156,7 @@ export {
useUIState,
useUserID,
useUsername,
+ useVoiceRecordingMuted,
useVoiceSelector,
useVoiceState
};
diff --git a/packages/api/src/hooks/useVoiceRecordingMuted.ts b/packages/api/src/hooks/useVoiceRecordingMuted.ts
new file mode 100644
index 0000000000..a562a84204
--- /dev/null
+++ b/packages/api/src/hooks/useVoiceRecordingMuted.ts
@@ -0,0 +1,20 @@
+import { muteVoiceRecording, unmuteVoiceRecording } from 'botframework-webchat-core';
+import { useCallback } from 'react';
+import { useDispatch, useSelector } from './internal/WebChatReduxContext';
+
+/**
+ * Hook to get and set voice recording mute state in speech-to-speech mode.
+ */
+export default function useVoiceRecordingMuted(): readonly [boolean, (muted: boolean) => void] {
+ const dispatch = useDispatch();
+ const value = useSelector(({ voice }) => voice.voiceState === 'muted');
+
+ const setter = useCallback(
+ (muted: boolean) => {
+ dispatch(muted ? muteVoiceRecording() : unmuteVoiceRecording());
+ },
+ [dispatch]
+ );
+
+ return Object.freeze([value, setter]);
+}
diff --git a/packages/api/src/providers/SpeechToSpeech/private/VoiceRecorderBridge.tsx b/packages/api/src/providers/SpeechToSpeech/private/VoiceRecorderBridge.tsx
index ea00dbe81d..bd2e351ab7 100644
--- a/packages/api/src/providers/SpeechToSpeech/private/VoiceRecorderBridge.tsx
+++ b/packages/api/src/providers/SpeechToSpeech/private/VoiceRecorderBridge.tsx
@@ -11,6 +11,7 @@ export function VoiceRecorderBridge(): null {
const [voiceState] = useVoiceState();
const postVoiceActivity = usePostVoiceActivity();
+ const muted = voiceState === 'muted';
// Derive recording state from voiceState - recording is active when not idle
const recording = voiceState !== 'idle';
@@ -29,7 +30,13 @@ export function VoiceRecorderBridge(): null {
[postVoiceActivity]
);
- const { record } = useRecorder(handleAudioChunk);
+ const { mute, record } = useRecorder(handleAudioChunk);
+
+ useEffect(() => {
+ if (muted) {
+ return mute();
+ }
+ }, [muted, mute]);
useEffect(() => {
if (recording) {
diff --git a/packages/api/src/providers/SpeechToSpeech/private/useRecorder.spec.tsx b/packages/api/src/providers/SpeechToSpeech/private/useRecorder.spec.tsx
index 6bb47cfa14..c11ef4eb5a 100644
--- a/packages/api/src/providers/SpeechToSpeech/private/useRecorder.spec.tsx
+++ b/packages/api/src/providers/SpeechToSpeech/private/useRecorder.spec.tsx
@@ -36,13 +36,16 @@ const mockWorkletNode = {
port: mockWorkletPort
};
+const mockSourceNode = {
+ connect: jest.fn(),
+ disconnect: jest.fn()
+};
+
const mockAudioContext = {
audioWorklet: {
addModule: jest.fn().mockResolvedValue(undefined)
},
- createMediaStreamSource: jest.fn(() => ({
- connect: jest.fn()
- })),
+ createMediaStreamSource: jest.fn(() => mockSourceNode),
destination: {},
resume: jest.fn().mockResolvedValue(undefined),
state: 'running'
@@ -218,4 +221,124 @@ describe('useRecorder', () => {
});
});
});
+
+ test('should return mute function', () => {
+ render();
+ expect(typeof hookData?.mute).toBe('function');
+ });
+
+ test('should send MUTE command and stop media stream when mute is called', async () => {
+ render();
+
+ // Start recording first
+ act(() => {
+ hookData?.record();
+ });
+
+ await waitFor(() => {
+ expect(mockWorkletPort.postMessage).toHaveBeenCalledWith({ command: 'START' });
+ });
+ await waitFor(() => expect(mockWorkletPort.onmessage).not.toBeNull());
+
+ // Clear mocks to isolate mute behavior
+ mockWorkletPort.postMessage.mockClear();
+ mockTrack.stop.mockClear();
+ mockSourceNode.disconnect.mockClear();
+
+ // Call mute
+ act(() => {
+ hookData?.mute();
+ });
+
+ // Should send MUTE command to worklet
+ expect(mockWorkletPort.postMessage).toHaveBeenCalledWith({ command: 'MUTE' });
+ // Should stop media stream tracks (mic indicator OFF)
+ expect(mockTrack.stop).toHaveBeenCalledTimes(1);
+ // Should disconnect source node
+ expect(mockSourceNode.disconnect).toHaveBeenCalledTimes(1);
+
+ // Clear to track only muted audio
+ onAudioChunk.mockClear();
+ (global.btoa as jest.Mock).mockClear();
+
+ // Simulate audio worklet sending silent frame (all zeros) while muted
+ const silentAudioData = new Float32Array(128).fill(0);
+ act(() => {
+ mockWorkletPort.onmessage!({
+ data: {
+ eventType: 'audio',
+ audioData: silentAudioData
+ }
+ });
+ });
+
+ await waitFor(() => expect(onAudioChunk).toHaveBeenCalledTimes(1));
+
+ // Verify btoa was called with data representing zeros
+ expect(global.btoa).toHaveBeenCalled();
+ const [[btoaCall]] = (global.btoa as jest.Mock).mock.calls;
+ // All characters should be null characters (representing 16-bit zeros)
+ const allZeros = [...btoaCall].every((char: string) => char.charCodeAt(0) === 0);
+ expect(allZeros).toBe(true);
+ });
+
+ test('should return unmute function from mute() that sends UNMUTE and restarts media stream', async () => {
+ render();
+
+ // Start recording first
+ act(() => {
+ hookData?.record();
+ });
+
+ await waitFor(() => {
+ expect(mockWorkletPort.postMessage).toHaveBeenCalledWith({ command: 'START' });
+ });
+ await waitFor(() => expect(mockWorkletPort.onmessage).not.toBeNull());
+
+ // Call mute and get unmute function
+ let unmute: (() => void) | undefined;
+ act(() => {
+ unmute = hookData?.mute();
+ });
+
+ // Clear mocks to isolate unmute behavior
+ mockWorkletPort.postMessage.mockClear();
+ mockMediaDevices.getUserMedia.mockClear();
+
+ // Call unmute
+ act(() => {
+ unmute?.();
+ });
+
+ // Should send UNMUTE command to worklet
+ expect(mockWorkletPort.postMessage).toHaveBeenCalledWith({ command: 'UNMUTE' });
+ // Should restart media stream
+ await waitFor(() => {
+ expect(mockMediaDevices.getUserMedia).toHaveBeenCalledTimes(1);
+ });
+
+ // Clear mocks to track audio after unmute
+ onAudioChunk.mockClear();
+ (global.btoa as jest.Mock).mockClear();
+
+ // Simulate real audio data after unmute (non-zero values)
+ const realAudioData = new Float32Array([0.7, -0.5, 0.9, -0.2, 0.4]);
+ act(() => {
+ mockWorkletPort.onmessage!({
+ data: {
+ eventType: 'audio',
+ audioData: realAudioData
+ }
+ });
+ });
+
+ await waitFor(() => expect(onAudioChunk).toHaveBeenCalledTimes(1));
+
+ // Verify real audio (non-zero) was processed
+ expect(global.btoa).toHaveBeenCalled();
+ const [[btoaCall]] = (global.btoa as jest.Mock).mock.calls;
+ // Real audio should have non-zero bytes
+ const hasNonZero = [...btoaCall].some((char: string) => char.charCodeAt(0) !== 0);
+ expect(hasNonZero).toBe(true);
+ });
});
diff --git a/packages/api/src/providers/SpeechToSpeech/private/useRecorder.ts b/packages/api/src/providers/SpeechToSpeech/private/useRecorder.ts
index 05ed029003..5b709fda9f 100644
--- a/packages/api/src/providers/SpeechToSpeech/private/useRecorder.ts
+++ b/packages/api/src/providers/SpeechToSpeech/private/useRecorder.ts
@@ -8,6 +8,7 @@ declare class AudioWorkletProcessor {
buffer: number[];
bufferSize: number;
constructor(options?: AudioWorkletNodeOptions);
+ muted: boolean;
process(inputs: Float32Array[][], outputs: Float32Array[][], parameters: Record): boolean;
readonly port: MessagePort;
recording: boolean;
@@ -20,11 +21,15 @@ declare function registerProcessor(name: string, processorCtor: typeof AudioWork
* without any TypeScript annotations that could be transformed by the compiler.
*/
const audioProcessorCode = `(${function () {
+ const RENDER_QUANTUM = 128;
+ const SILENT_FRAME = new Float32Array(RENDER_QUANTUM);
+
class AudioRecorderProcessor extends AudioWorkletProcessor {
constructor(options: AudioWorkletNodeOptions) {
super();
this.buffer = [];
this.bufferSize = options.processorOptions.bufferSize;
+ this.muted = false;
this.recording = false;
this.port.onmessage = e => {
@@ -33,13 +38,21 @@ const audioProcessorCode = `(${function () {
} else if (e.data.command === 'STOP') {
this.recording = false;
this.buffer = [];
+ } else if (e.data.command === 'MUTE') {
+ this.muted = true;
+ } else if (e.data.command === 'UNMUTE') {
+ this.muted = false;
}
};
}
process(inputs: Float32Array[][]) {
- if (inputs[0] && inputs[0].length && this.recording) {
- this.buffer.push(...inputs[0][0]);
+ if (this.recording) {
+ // Use real audio when not muted, otherwise silent frame to keep connection alive.
+ const inputAudio = inputs[0]?.[0];
+ const audioData = this.muted || !inputAudio ? SILENT_FRAME : inputAudio;
+ this.buffer.push(...audioData);
+
while (this.buffer.length >= this.bufferSize) {
const chunk = this.buffer.splice(0, this.bufferSize);
this.port.postMessage({ eventType: 'audio', audioData: new Float32Array(chunk) });
@@ -62,6 +75,7 @@ const MS_IN_SECOND = 1000;
export function useRecorder(onAudioChunk: (base64: string, timestamp: string) => void) {
const [{ Date }] = usePonyfill();
const audioCtxRef = useRef(undefined);
+ const sourceRef = useRef(undefined);
const streamRef = useRef(undefined);
const voiceConfiguration = useCapabilities(caps => caps.voiceConfiguration);
const workletRef = useRef(undefined);
@@ -69,17 +83,44 @@ export function useRecorder(onAudioChunk: (base64: string, timestamp: string) =>
const chunkIntervalMs = voiceConfiguration?.chunkIntervalMs ?? DEFAULT_CHUNK_SIZE_IN_MS;
const sampleRate = voiceConfiguration?.sampleRate ?? DEFAULT_SAMPLE_RATE;
+ const stopMediaStream = useCallback(() => {
+ if (streamRef.current) {
+ streamRef.current.getTracks().forEach(track => track.stop());
+ streamRef.current = undefined;
+ }
+ }, [streamRef]);
+
+ // Acquire MediaStream and connect source to worklet
+ const acquireAndConnectMediaStream = useCallback(async () => {
+ const audioCtx = audioCtxRef.current;
+ if (!audioCtx) {
+ return;
+ }
+
+ const stream = await navigator.mediaDevices.getUserMedia({
+ audio: {
+ channelCount: 1,
+ echoCancellation: true,
+ sampleRate
+ }
+ });
+ streamRef.current = stream;
+
+ const source = audioCtx.createMediaStreamSource(stream);
+ if (workletRef.current) {
+ source.connect(workletRef.current);
+ }
+ sourceRef.current = source;
+ }, [audioCtxRef, sampleRate, sourceRef, streamRef, workletRef]);
+
const stopRecording = useCallback(() => {
if (workletRef.current) {
workletRef.current.port.postMessage({ command: 'STOP' });
workletRef.current.disconnect();
workletRef.current = undefined;
}
- if (streamRef.current) {
- streamRef.current.getTracks().forEach(track => track.stop());
- streamRef.current = undefined;
- }
- }, [streamRef, workletRef]);
+ stopMediaStream();
+ }, [stopMediaStream, workletRef]);
const initAudio = useCallback(async () => {
if (audioCtxRef.current) {
@@ -103,15 +144,7 @@ export function useRecorder(onAudioChunk: (base64: string, timestamp: string) =>
if (audioCtx.state === 'suspended') {
await audioCtx.resume();
}
- const stream = await navigator.mediaDevices.getUserMedia({
- audio: {
- channelCount: 1,
- echoCancellation: true,
- sampleRate
- }
- });
- streamRef.current = stream;
- const source = audioCtx.createMediaStreamSource(stream);
+
const worklet = new AudioWorkletNode(audioCtx, 'audio-recorder', {
processorOptions: {
bufferSize: (sampleRate * chunkIntervalMs) / MS_IN_SECOND
@@ -131,16 +164,57 @@ export function useRecorder(onAudioChunk: (base64: string, timestamp: string) =>
}
};
- source.connect(worklet);
worklet.connect(audioCtx.destination);
- worklet.port.postMessage({ command: 'START' });
workletRef.current = worklet;
- }, [audioCtxRef, chunkIntervalMs, Date, initAudio, onAudioChunk, sampleRate]);
+
+ await acquireAndConnectMediaStream();
+
+ worklet.port.postMessage({ command: 'START' });
+ }, [
+ acquireAndConnectMediaStream,
+ audioCtxRef,
+ chunkIntervalMs,
+ Date,
+ initAudio,
+ onAudioChunk,
+ sampleRate,
+ workletRef
+ ]);
+
+ const muteRecording = useCallback(() => {
+ // Stop MediaStream (mic indicator OFF) and disconnect source
+ stopMediaStream();
+
+ if (sourceRef.current) {
+ sourceRef.current.disconnect();
+ sourceRef.current = undefined;
+ }
+
+ // Tell worklet to output silence
+ if (workletRef.current) {
+ workletRef.current.port.postMessage({ command: 'MUTE' });
+ }
+
+ // Return unmute function
+ return () => {
+ if (!audioCtxRef.current || !workletRef.current) {
+ return;
+ }
+
+ // Tell worklet to use real audio
+ workletRef.current.port.postMessage({ command: 'UNMUTE' });
+
+ // Restart MediaStream and reconnect source (fire and forget)
+ acquireAndConnectMediaStream();
+ };
+ }, [acquireAndConnectMediaStream, audioCtxRef, sourceRef, stopMediaStream, workletRef]);
const record = useCallback(() => {
startRecording();
return stopRecording;
}, [startRecording, stopRecording]);
- return useMemo(() => ({ record }), [record]);
+ const mute = useCallback(() => muteRecording(), [muteRecording]);
+
+ return useMemo(() => ({ mute, record }), [mute, record]);
}
diff --git a/packages/bundle/src/boot/actual/hook/minimal.ts b/packages/bundle/src/boot/actual/hook/minimal.ts
index b09c196177..5b7e344151 100644
--- a/packages/bundle/src/boot/actual/hook/minimal.ts
+++ b/packages/bundle/src/boot/actual/hook/minimal.ts
@@ -96,6 +96,7 @@ export {
useUIState,
useUserID,
useUsername,
+ useVoiceRecordingMuted,
useVoiceSelector,
useVoiceState,
useWebSpeechPonyfill,
diff --git a/packages/component/src/boot/hook.ts b/packages/component/src/boot/hook.ts
index 426948db69..38f37d1659 100644
--- a/packages/component/src/boot/hook.ts
+++ b/packages/component/src/boot/hook.ts
@@ -74,6 +74,7 @@ export {
useUIState,
useUserID,
useUsername,
+ useVoiceRecordingMuted,
useVoiceSelector,
useVoiceState
} from 'botframework-webchat-api/hook';
diff --git a/packages/core/src/actions/muteVoiceRecording.ts b/packages/core/src/actions/muteVoiceRecording.ts
new file mode 100644
index 0000000000..4a3f65c82e
--- /dev/null
+++ b/packages/core/src/actions/muteVoiceRecording.ts
@@ -0,0 +1,17 @@
+const VOICE_MUTE_RECORDING = 'WEB_CHAT/VOICE_MUTE_RECORDING' as const;
+
+type VoiceMuteRecordingAction = {
+ type: typeof VOICE_MUTE_RECORDING;
+};
+
+function muteVoiceRecording(): VoiceMuteRecordingAction {
+ return {
+ type: VOICE_MUTE_RECORDING
+ };
+}
+
+export default muteVoiceRecording;
+
+export { VOICE_MUTE_RECORDING };
+
+export type { VoiceMuteRecordingAction };
diff --git a/packages/core/src/actions/setVoiceState.ts b/packages/core/src/actions/setVoiceState.ts
index 53fc12b7c2..70feef25c3 100644
--- a/packages/core/src/actions/setVoiceState.ts
+++ b/packages/core/src/actions/setVoiceState.ts
@@ -1,6 +1,6 @@
const VOICE_SET_STATE = 'WEB_CHAT/VOICE_SET_STATE' as const;
-type VoiceState = 'idle' | 'listening' | 'user_speaking' | 'processing' | 'bot_speaking';
+type VoiceState = 'idle' | 'listening' | 'muted' | 'user_speaking' | 'processing' | 'bot_speaking';
type VoiceSetStateAction = {
type: typeof VOICE_SET_STATE;
diff --git a/packages/core/src/actions/unmuteVoiceRecording.ts b/packages/core/src/actions/unmuteVoiceRecording.ts
new file mode 100644
index 0000000000..c4cbc74a46
--- /dev/null
+++ b/packages/core/src/actions/unmuteVoiceRecording.ts
@@ -0,0 +1,17 @@
+const VOICE_UNMUTE_RECORDING = 'WEB_CHAT/VOICE_UNMUTE_RECORDING' as const;
+
+type VoiceUnmuteRecordingAction = {
+ type: typeof VOICE_UNMUTE_RECORDING;
+};
+
+function unmuteVoiceRecording(): VoiceUnmuteRecordingAction {
+ return {
+ type: VOICE_UNMUTE_RECORDING
+ };
+}
+
+export default unmuteVoiceRecording;
+
+export { VOICE_UNMUTE_RECORDING };
+
+export type { VoiceUnmuteRecordingAction };
diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts
index 27fde76d63..c29a003704 100644
--- a/packages/core/src/index.ts
+++ b/packages/core/src/index.ts
@@ -3,7 +3,10 @@ import disconnect from './actions/disconnect';
import dismissNotification from './actions/dismissNotification';
import emitTypingIndicator from './actions/emitTypingIndicator';
import markActivity from './actions/markActivity';
+import muteVoiceRecording from './actions/muteVoiceRecording';
import postActivity from './actions/postActivity';
+import postVoiceActivity from './actions/postVoiceActivity';
+import registerVoiceHandler from './actions/registerVoiceHandler';
import sendEvent from './actions/sendEvent';
import sendFiles from './actions/sendFiles';
import sendMessage from './actions/sendMessage';
@@ -17,11 +20,16 @@ import setSendBox from './actions/setSendBox';
import setSendBoxAttachments from './actions/setSendBoxAttachments';
import setSendTimeout from './actions/setSendTimeout';
import setSendTypingIndicator from './actions/setSendTypingIndicator';
+import setVoiceState from './actions/setVoiceState';
import startDictate from './actions/startDictate';
import startSpeakingActivity from './actions/startSpeakingActivity';
+import startVoiceRecording from './actions/startVoiceRecording';
import stopDictate from './actions/stopDictate';
import stopSpeakingActivity from './actions/stopSpeakingActivity';
+import stopVoiceRecording from './actions/stopVoiceRecording';
import submitSendBox from './actions/submitSendBox';
+import unmuteVoiceRecording from './actions/unmuteVoiceRecording';
+import unregisterVoiceHandler from './actions/unregisterVoiceHandler';
import * as ActivityClientState from './constants/ActivityClientState';
import * as DictateState from './constants/DictateState';
import createStore, {
@@ -44,12 +52,6 @@ import isVoiceActivity from './utils/voiceActivity/isVoiceActivity';
import isVoiceTranscriptActivity from './utils/voiceActivity/isVoiceTranscriptActivity';
import getVoiceActivityRole from './utils/voiceActivity/getVoiceActivityRole';
import getVoiceActivityText from './utils/voiceActivity/getVoiceActivityText';
-import startVoiceRecording from './actions/startVoiceRecording';
-import stopVoiceRecording from './actions/stopVoiceRecording';
-import setVoiceState from './actions/setVoiceState';
-import registerVoiceHandler from './actions/registerVoiceHandler';
-import unregisterVoiceHandler from './actions/unregisterVoiceHandler';
-import postVoiceActivity from './actions/postVoiceActivity';
import type { VoiceState } from './actions/setVoiceState';
import type { VoiceHandler } from './actions/registerVoiceHandler';
@@ -116,6 +118,7 @@ export {
isVoiceActivity,
isVoiceTranscriptActivity,
markActivity,
+ muteVoiceRecording,
onErrorResumeNext,
parseAction,
parseClaim,
@@ -127,7 +130,6 @@ export {
postActivity,
postVoiceActivity,
registerVoiceHandler,
- unregisterVoiceHandler,
sendEvent,
sendFiles,
sendMessage,
@@ -149,7 +151,9 @@ export {
stopDictate,
stopSpeakingActivity,
stopVoiceRecording,
- submitSendBox
+ submitSendBox,
+ unmuteVoiceRecording,
+ unregisterVoiceHandler
};
export type {
diff --git a/packages/core/src/reducers/voiceActivity.ts b/packages/core/src/reducers/voiceActivity.ts
index d7f6953e49..727394ea77 100644
--- a/packages/core/src/reducers/voiceActivity.ts
+++ b/packages/core/src/reducers/voiceActivity.ts
@@ -1,20 +1,26 @@
+import { VOICE_MUTE_RECORDING } from '../actions/muteVoiceRecording';
import { VOICE_REGISTER_HANDLER } from '../actions/registerVoiceHandler';
import { VOICE_SET_STATE } from '../actions/setVoiceState';
import { VOICE_START_RECORDING } from '../actions/startVoiceRecording';
import { VOICE_STOP_RECORDING } from '../actions/stopVoiceRecording';
+import { VOICE_UNMUTE_RECORDING } from '../actions/unmuteVoiceRecording';
import { VOICE_UNREGISTER_HANDLER } from '../actions/unregisterVoiceHandler';
+import type { VoiceMuteRecordingAction } from '../actions/muteVoiceRecording';
import type { VoiceHandler, VoiceRegisterHandlerAction } from '../actions/registerVoiceHandler';
import type { VoiceSetStateAction, VoiceState } from '../actions/setVoiceState';
import type { VoiceStartRecordingAction } from '../actions/startVoiceRecording';
import type { VoiceStopRecordingAction } from '../actions/stopVoiceRecording';
+import type { VoiceUnmuteRecordingAction } from '../actions/unmuteVoiceRecording';
import type { VoiceUnregisterHandlerAction } from '../actions/unregisterVoiceHandler';
type VoiceActivityActions =
+ | VoiceMuteRecordingAction
| VoiceRegisterHandlerAction
| VoiceSetStateAction
| VoiceStartRecordingAction
| VoiceStopRecordingAction
+ | VoiceUnmuteRecordingAction
| VoiceUnregisterHandlerAction;
interface VoiceActivityState {
@@ -32,6 +38,18 @@ export default function voiceActivity(
action: VoiceActivityActions
): VoiceActivityState {
switch (action.type) {
+ case VOICE_MUTE_RECORDING:
+ // Only allow muting when in listening state
+ if (state.voiceState !== 'listening') {
+ console.warn(`botframework-webchat: Cannot mute from "${state.voiceState}" state, must be "listening"`);
+ return state;
+ }
+
+ return {
+ ...state,
+ voiceState: 'muted'
+ };
+
case VOICE_REGISTER_HANDLER: {
const newHandlers = new Map(state.voiceHandlers);
newHandlers.set(action.payload.id, action.payload.voiceHandler);
@@ -72,6 +90,16 @@ export default function voiceActivity(
voiceState: 'idle'
};
+ case VOICE_UNMUTE_RECORDING:
+ if (state.voiceState !== 'muted') {
+ console.warn(`botframework-webchat: Should not transit from "${state.voiceState}" to "listening"`);
+ }
+
+ return {
+ ...state,
+ voiceState: 'listening'
+ };
+
default:
return state;
}