Skip to content

Commit 6360a10

Browse files
committed
feat: new package use-speech-recognition
1 parent 7488bd7 commit 6360a10

File tree

7 files changed

+238
-0
lines changed

7 files changed

+238
-0
lines changed

app/package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
"@n3p6/react-three-vrm": "workspace:",
1616
"@n3p6/react-three-yuka": "workspace:",
1717
"@n3p6/use-illuminance": "workspace:",
18+
"@n3p6/use-speech-recognition": "workspace:",
1819
"@pixiv/three-vrm": "catalog:three",
1920
"@pixiv/three-vrm-animation": "catalog:three",
2021
"@pmndrs/xr": "catalog:three",
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
import { useSpeechRecognition } from '@n3p6/use-speech-recognition'
2+
import { Container, Root, Text } from '@react-three/uikit'
3+
import { Button } from '@react-three/uikit-default'
4+
import { useEffect } from 'react'
5+
6+
const DebugSpeechRecognition = () => {
7+
const { isFinal, isListening, isSupported, result, start, stop } = useSpeechRecognition({
8+
// interimResults: false,
9+
lang: navigator.language,
10+
})
11+
12+
useEffect(() => console.warn(result), [result])
13+
14+
return (
15+
<group position={[0, 1, 0]}>
16+
<Root>
17+
<Container flexDirection="column" gap={4}>
18+
<Text>
19+
isSupported:
20+
{isSupported}
21+
</Text>
22+
<Text>
23+
isListening:
24+
{isListening}
25+
</Text>
26+
<Text>
27+
isFinal:
28+
{isFinal}
29+
</Text>
30+
<Text>
31+
result:
32+
{result}
33+
</Text>
34+
<Button
35+
data-test-id="speech-recognition-start"
36+
onClick={start}
37+
>
38+
<Text>Start</Text>
39+
</Button>
40+
<Button
41+
data-test-id="speech-recognition-stop"
42+
onClick={stop}
43+
variant="destructive"
44+
>
45+
<Text>Stop</Text>
46+
</Button>
47+
</Container>
48+
</Root>
49+
</group>
50+
)
51+
}
52+
53+
export default DebugSpeechRecognition

app/src/router.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ export type Path =
1111
| `/debug/menu`
1212
| `/debug/planes`
1313
| `/debug/settings`
14+
| `/debug/speech-recognition`
1415
| `/debug/tablet`
1516
| `/debug/text`
1617
| `/settings`
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
# @n3p6/use-speech-recognition
2+
3+
A very small React Hook to help you use [SpeechRecognition](https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition) from [Web Speech API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API).
4+
5+
## Usage
6+
7+
### Install
8+
9+
### Example
10+
11+
```tsx
12+
import { useSpeechRecognition } from '@n3p6/use-speech-recognition'
13+
14+
const App = () => {
15+
const { isFinal, isListening, isSupported, result, start, stop } = useSpeechRecognition()
16+
17+
return (
18+
<div>
19+
<p>
20+
isSupported:
21+
{isSupported}
22+
</p>
23+
<p>
24+
isListening:
25+
{isListening}
26+
</p>
27+
<p>
28+
isFinal:
29+
{isFinal}
30+
</p>
31+
<p>
32+
result:
33+
{result}
34+
</p>
35+
<button data-test-id="start" onClick={start} type="submit">Start</button>
36+
<button data-test-id="stop" onClick={stop} type="submit">Stop</button>
37+
</div>
38+
)
39+
}
40+
41+
export default App
42+
```
43+
44+
# License
45+
46+
[MIT](../../LICENSE.md)
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
{
2+
"name": "@n3p6/use-speech-recognition",
3+
"type": "module",
4+
"version": "0.1.0-beta.1",
5+
"author": "Moeru AI",
6+
"license": "MIT",
7+
"homepage": "https://github.com/moeru-ai/n3p6",
8+
"repository": {
9+
"type": "git",
10+
"url": "git+https://github.com/moeru-ai/n3p6.git",
11+
"directory": "packages/use-speech-recognition"
12+
},
13+
"bugs": "https://github.com/moeru-ai/n3p6/issues",
14+
"sideEffects": false,
15+
"exports": "./src/index.ts",
16+
"publishConfig": {
17+
"exports": {
18+
".": {
19+
"types": "./dist/index.d.ts",
20+
"default": "./dist/index.js"
21+
},
22+
"./package.json": "./package.json"
23+
},
24+
"main": "./dist/index.js",
25+
"types": "./dist/index.d.ts"
26+
},
27+
"files": [
28+
"dist"
29+
],
30+
"scripts": {
31+
"build": "pkgroll"
32+
},
33+
"peerDependencies": {
34+
"@types/react": "catalog:react",
35+
"react": "catalog:react"
36+
},
37+
"dependencies": {
38+
"@types/dom-speech-recognition": "^0.0.6"
39+
}
40+
}
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
/// <reference types="dom-speech-recognition" />
2+
3+
import { useCallback, useMemo, useState } from 'react'
4+
5+
export interface UseSpeechRecognitionOptions {
6+
/** @default `true` */
7+
continuous?: SpeechRecognition['continuous']
8+
/** @default `true` */
9+
interimResults?: SpeechRecognition['interimResults']
10+
/** @default `en-US` */
11+
lang?: SpeechRecognition['lang']
12+
/** @default `1` */
13+
maxAlternatives?: SpeechRecognition['maxAlternatives']
14+
}
15+
16+
export const useSpeechRecognition = (options: UseSpeechRecognitionOptions = {}) => {
17+
const isSupported = useMemo(() => 'SpeechRecognition' in window || 'webkitSpeechRecognition' in window, [])
18+
const [isFinal, setIsFinal] = useState(false)
19+
const [isListening, setIsListening] = useState(false)
20+
const [result, setResult] = useState<string>()
21+
const [error, setError] = useState<SpeechRecognitionErrorEvent>()
22+
23+
const speechRecognition = useMemo(() => {
24+
if (!isSupported)
25+
return
26+
27+
// eslint-disable-next-line ts/strict-boolean-expressions
28+
const speechRecognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)()
29+
30+
speechRecognition.continuous = options.continuous ?? true
31+
speechRecognition.interimResults = options.interimResults ?? true
32+
speechRecognition.lang = options.lang ?? 'en-US'
33+
speechRecognition.maxAlternatives = options.maxAlternatives ?? 1
34+
35+
speechRecognition.onstart = () => {
36+
setIsListening(true)
37+
setIsFinal(false)
38+
}
39+
40+
speechRecognition.onresult = (event) => {
41+
const currentResult = event.results[event.resultIndex]
42+
const { transcript } = currentResult[0]
43+
44+
setIsFinal(currentResult.isFinal)
45+
setResult(transcript)
46+
setError(undefined)
47+
}
48+
49+
speechRecognition.onerror = error =>
50+
setError(error)
51+
52+
speechRecognition.onend = () =>
53+
setIsListening(false)
54+
55+
return speechRecognition
56+
}, [options, isSupported])
57+
58+
const start = useCallback(() => speechRecognition?.start(), [speechRecognition])
59+
const stop = useCallback(() => speechRecognition?.stop(), [speechRecognition])
60+
const toggle = useCallback(() => {
61+
if (isListening)
62+
speechRecognition?.stop()
63+
else
64+
speechRecognition?.start()
65+
}, [speechRecognition, isListening])
66+
67+
return {
68+
error,
69+
isFinal,
70+
isListening,
71+
isSupported,
72+
result,
73+
start,
74+
stop,
75+
toggle,
76+
}
77+
}

pnpm-lock.yaml

Lines changed: 20 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)