forked from wesbos/JavaScript30
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathindex-START.html
More file actions
158 lines (130 loc) · 5.11 KB
/
index-START.html
File metadata and controls
158 lines (130 loc) · 5.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Speech Detection</title>
<link rel="icon" href="https://fav.farm/🔥" />
</head>
<body>
<div class="words" contenteditable>
</div>
<button class="start-button">Start</button>
<script>
// We grab the SpeechRecognition object from the window
// We use the webkit prefix for any browsers that don't support the unprefixed version
window.SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
if ( !window.SpeechRecognition ) {
document.querySelector('.words').textContent = 'Speech recognition is not supported in this browser.';
document.querySelector('.start-button').disabled = true;
throw new Error('SpeechRecognition not supported');
}
// We create a new instance of SpeechRecognition
const recognition = new SpeechRecognition();
// If we want to get interim results while we are speaking, we set the interimResults property to true
// Otherwise, we will only get final results after speech recognition has finished processing
recognition.interimResults = true;
let stopped = false;
// Create a new paragraph element and add it to the '.words' div
let p = document.createElement('p');
const words = document.querySelector('.words');
words.appendChild(p);
// We listen for the 'result' event on the recognition object
// Which is fired when we get a result from the speech recognition
// The event object contains a results property
// Which is an array of all the results we have received so far
recognition.addEventListener( 'result', event => {
const transcript = Array.from( event.results )
.map( result => result[0] )
.map( result => result.transcript )
.join('');
// We set the text content of the paragraph to the transcript
p.textContent = transcript;
// If the result is final, we create a new paragraph element and add it to the '.words' div
// Final means that the speech recognition has finished processing the audio and has a final result for us
if ( event.results[event.resultIndex].isFinal ) {
p = document.createElement('p');
words.appendChild(p);
}
// If the transcript includes the word 'stop', we set the stopped variable to true and stop the recognition
if ( /\bstop\b/i.test(transcript) ) {
stopped = true;
recognition.stop();
}
} );
// When we stop speaking, we want to start the recognition again so we can keep getting results
// Otherwise, it will only work once and then stop
recognition.addEventListener( 'end', () => {
if ( false === stopped ) {
recognition.start();
}
} );
// We grab the start button and listen for a click event on it
const startButton = document.querySelector('.start-button');
// On click, we set the stopped variable to false and start the recognition again
// We also disable the start button so we can't click it again while the recognition is running
startButton.addEventListener( 'click', () => {
stopped = false;
recognition.start();
startButton.disabled = true;
} );
// We disable the start button when the recognition starts
recognition.addEventListener( 'start', () => startButton.disabled = true );
// When the recognition ends, we check if the stopped variable is true
// If it is, we enable the start button again so we can start the recognition again
// So if the user has said 'stop', the recognition will stop and the start button will be enabled again
recognition.addEventListener( 'end', () => {
if ( true === stopped ) {
startButton.disabled = false;
}
} );
// We listen for the 'error' event on the recognition object
// Which is fired when there is an error with the speech recognition
// If the error is 'not-allowed', it means that the user has denied access to the microphone
// In that case, we set the text content of the '.words' div to a message telling the user to allow microphone access and refresh the page
recognition.addEventListener( 'error', e => {
if ( e.error === 'not-allowed' ) {
words.textContent = 'Microphone access was denied. Please allow microphone access and refresh the page.';
startButton.disabled = true;
}
} );
// We start the recognition for the first time when the page loads
recognition.start();
</script>
<style>
html {
font-size: 10px;
}
body {
background: #ffc600;
font-family: 'helvetica neue';
font-weight: 200;
font-size: 20px;
}
.words {
max-width: 500px;
margin: 50px auto;
background: white;
border-radius: 5px;
box-shadow: 10px 10px 0 rgba(0,0,0,0.1);
padding: 1rem 2rem 1rem 5rem;
background: -webkit-gradient(linear, 0 0, 0 100%, from(#d9eaf3), color-stop(4%, #fff)) 0 4px;
background-size: 100% 3rem;
position: relative;
line-height: 3rem;
}
p {
margin: 0 0 3rem;
}
.words:before {
content: '';
position: absolute;
width: 4px;
top: 0;
left: 30px;
bottom: 0;
border: 1px solid;
border-color: transparent #efe4e4;
}
</style>
</body>
</html>