@@ -49,7 +49,7 @@ expo-speech-recognition implements the iOS [`SFSpeechRecognizer`](https://develo
49
49
1 . Install the package
50
50
51
51
```
52
- npm install @jamsch/ expo-speech-recognition
52
+ npm install expo-speech-recognition
53
53
```
54
54
55
55
2 . Configure the config plugin.
@@ -62,7 +62,7 @@ npm install @jamsch/expo-speech-recognition
62
62
" expo" : {
63
63
" plugins" : [
64
64
[
65
- " @jamsch/ expo-speech-recognition" ,
65
+ " expo-speech-recognition" ,
66
66
{
67
67
" microphonePermission" : " Allow $(PRODUCT_NAME) to use the microphone." ,
68
68
" speechRecognitionPermission" : " Allow $(PRODUCT_NAME) to use speech recognition." ,
@@ -88,7 +88,7 @@ Using hooks is the easiest way to get started. The `useSpeechRecognitionEvent` h
88
88
import {
89
89
ExpoSpeechRecognitionModule ,
90
90
useSpeechRecognitionEvent ,
91
- } from " @jamsch/ expo-speech-recognition" ;
91
+ } from " expo-speech-recognition" ;
92
92
93
93
function App() {
94
94
const [recognizing, setRecognizing] = useState (false );
@@ -143,7 +143,7 @@ function App() {
143
143
You should request permissions prior to starting recognition. This library exports two functions: ` getPermissionsAsync ` and ` requestPermissionsAsync ` for this purpose. If you do not request permissions or the user has denied permissions after starting, expect an ` error ` event with the ` error ` code set to ` not-allowed ` .
144
144
145
145
``` ts
146
- import { ExpoSpeechRecognitionModule } from " @jamsch/ expo-speech-recognition" ;
146
+ import { ExpoSpeechRecognitionModule } from " expo-speech-recognition" ;
147
147
148
148
ExpoSpeechRecognitionModule .getPermissionsAsync ().then ((result ) => {
149
149
console .log (" Status:" , result .status );
@@ -170,7 +170,7 @@ You can also use the `ExpoSpeechRecognitionModule` to use the native APIs direct
170
170
import {
171
171
ExpoSpeechRecognitionModule ,
172
172
addSpeechRecognitionListener ,
173
- } from " @jamsch/ expo-speech-recognition" ;
173
+ } from " expo-speech-recognition" ;
174
174
175
175
// Register event listeners
176
176
const startListener = addSpeechRecognitionListener (" start" , () => {
@@ -294,7 +294,7 @@ import {
294
294
type ExpoSpeechRecognitionErrorCode ,
295
295
addSpeechRecognitionListener ,
296
296
useSpeechRecognitionEvent ,
297
- } from " @jamsch/ expo-speech-recognition" ;
297
+ } from " expo-speech-recognition" ;
298
298
299
299
addSpeechRecognitionListener (" error" , (event ) => {
300
300
console .log (" error code:" , event .error , " error messsage:" , event .message );
@@ -344,7 +344,7 @@ import { Button, View } from "react-native";
344
344
import {
345
345
ExpoSpeechRecognitionModule ,
346
346
useSpeechRecognitionEvent ,
347
- } from " @jamsch/ expo-speech-recognition" ;
347
+ } from " expo-speech-recognition" ;
348
348
349
349
function RecordAudio() {
350
350
const [recording, setRecording] = useState (false );
@@ -411,7 +411,7 @@ function AudioPlayer(props: { source: string }) {
411
411
412
412
## Transcribing audio files
413
413
414
- You can use the ` audioSource.sourceUri ` option to transcribe audio files instead of using the microphone.
414
+ You can use the ` audioSource.uri ` option to transcribe audio files instead of using the microphone.
415
415
416
416
> ** Important note** : This feature is available on Android 13+ and iOS. If the device does not support the feature, you'll receive an ` error ` event with the code ` audio-capture ` .
417
417
@@ -443,7 +443,7 @@ import {
443
443
ExpoSpeechRecognitionModule ,
444
444
useSpeechRecognitionEvent ,
445
445
AudioEncodingAndroid ,
446
- } from " @jamsch/ expo-speech-recognition" ;
446
+ } from " expo-speech-recognition" ;
447
447
448
448
function TranscribeAudioFile() {
449
449
const [transcription, setTranscription] = useState (" " );
@@ -500,7 +500,7 @@ Refer to the [SpeechRecognition MDN docs](https://developer.mozilla.org/en-US/do
500
500
// "npm install -D @types/dom-speech-recognition"
501
501
import " dom-speech-recognition" ;
502
502
503
- import { ExpoWebSpeechRecognition } from " @jamsch/ expo-speech-recognition" ;
503
+ import { ExpoWebSpeechRecognition } from " expo-speech-recognition" ;
504
504
505
505
// Polyfill the globals for use in external libraries
506
506
webkitSpeechRecognition = ExpoWebSpeechRecognition ;
@@ -522,7 +522,7 @@ recognition.contextualStrings = ["Carlsen", "Nepomniachtchi", "Praggnanandhaa"];
522
522
recognition .requiresOnDeviceRecognition = true ;
523
523
recognition .addsPunctuation = true ;
524
524
recognition .androidIntentOptions = {
525
- EXTRA_LANGUAGE_MODEL: " quick_response " ,
525
+ EXTRA_LANGUAGE_MODEL: " web_search " ,
526
526
};
527
527
recognition .androidRecognitionServicePackage = " com.google.android.tts" ;
528
528
@@ -571,7 +571,7 @@ recognition.abort();
571
571
On Android, you may notice that there's a beep sound when you start and stop speech recognition. This is due to a hardcoded behavior in the underlying SpeechRecognizer API. However, a workaround you can use is by enabling continuous recognition:
572
572
573
573
``` ts
574
- import { ExpoSpeechRecognitionModule } from " @jamsch/ expo-speech-recognition" ;
574
+ import { ExpoSpeechRecognitionModule } from " expo-speech-recognition" ;
575
575
576
576
ExpoSpeechRecognitionModule .start ({
577
577
lang: " en-US" ,
@@ -616,7 +616,7 @@ As of 7 Aug 2024, the following platforms are supported:
616
616
Starts speech recognition.
617
617
618
618
``` ts
619
- import { ExpoSpeechRecognitionModule } from " @jamsch/ expo-speech-recognition" ;
619
+ import { ExpoSpeechRecognitionModule } from " expo-speech-recognition" ;
620
620
621
621
ExpoSpeechRecognitionModule .start ({
622
622
lang: " en-US" ,
@@ -628,7 +628,7 @@ ExpoSpeechRecognitionModule.start({
628
628
Stops speech recognition and attempts to return a final result (through the ` result ` event).
629
629
630
630
``` ts
631
- import { ExpoSpeechRecognitionModule } from " @jamsch/ expo-speech-recognition" ;
631
+ import { ExpoSpeechRecognitionModule } from " expo-speech-recognition" ;
632
632
633
633
ExpoSpeechRecognitionModule .stop ();
634
634
// Expect the following events to be emitted in order:
@@ -645,7 +645,7 @@ ExpoSpeechRecognitionModule.stop();
645
645
Immediately cancels speech recognition (does not process the final result).
646
646
647
647
``` ts
648
- import { ExpoSpeechRecognitionModule } from " @jamsch/ expo-speech-recognition" ;
648
+ import { ExpoSpeechRecognitionModule } from " expo-speech-recognition" ;
649
649
650
650
ExpoSpeechRecognitionModule .abort ();
651
651
// Expect an "error" event to be emitted with the code "aborted"
@@ -659,7 +659,7 @@ For iOS, once a user has granted (or denied) location permissions by responding
659
659
the only way that the permissions can be changed is by the user themselves using the device settings app.
660
660
661
661
``` ts
662
- import { requestPermissionsAsync } from " @jamsch/ expo-speech-recognition" ;
662
+ import { requestPermissionsAsync } from " expo-speech-recognition" ;
663
663
664
664
requestPermissionsAsync ().then ((result ) => {
665
665
console .log (" Status:" , result .status ); // "granted" | "denied" | "not-determined"
@@ -674,7 +674,7 @@ requestPermissionsAsync().then((result) => {
674
674
Returns the current permission status for the microphone and speech recognition.
675
675
676
676
``` ts
677
- import { getPermissionsAsync } from " @jamsch/ expo-speech-recognition" ;
677
+ import { getPermissionsAsync } from " expo-speech-recognition" ;
678
678
679
679
getPermissionsAsync ().then ((result ) => {
680
680
console .log (" Status:" , result .status ); // "granted" | "denied" | "not-determined"
@@ -689,7 +689,7 @@ getPermissionsAsync().then((result) => {
689
689
Returns the current internal state of the speech recognizer.
690
690
691
691
``` ts
692
- import { getStateAsync } from " @jamsch/ expo-speech-recognition" ;
692
+ import { getStateAsync } from " expo-speech-recognition" ;
693
693
694
694
// Note: you probably should rather rely on the events emitted by the SpeechRecognition API instead
695
695
getStateAsync ().then ((state ) => {
@@ -701,7 +701,7 @@ getStateAsync().then((state) => {
701
701
### ` addSpeechRecognitionListener(eventName: string, listener: (event: any) => void): { remove: () => void } `
702
702
703
703
``` ts
704
- import { addSpeechRecognitionListener } from " @jamsch/ expo-speech-recognition" ;
704
+ import { addSpeechRecognitionListener } from " expo-speech-recognition" ;
705
705
706
706
const listener = addSpeechRecognitionListener (" result" , (event ) => {
707
707
console .log (" result:" , event .results [event .resultIndex ][0 ].transcript );
@@ -716,7 +716,7 @@ listener.remove();
716
716
Get the list of supported locales and the installed locales that can be used for on-device speech recognition.
717
717
718
718
``` ts
719
- import { getSupportedLocales } from " @jamsch/ expo-speech-recognition" ;
719
+ import { getSupportedLocales } from " expo-speech-recognition" ;
720
720
721
721
getSupportedLocales ({
722
722
/**
@@ -751,7 +751,7 @@ Get list of speech recognition services available on the device.
751
751
> Note: this only includes services that are listed under ` androidSpeechServicePackages ` in your app.json as well as the core services listed under ` forceQueryable ` when running the command: ` adb shell dumpsys package queries `
752
752
753
753
``` ts
754
- import { getSpeechRecognitionServices } from " @jamsch/ expo-speech-recognition" ;
754
+ import { getSpeechRecognitionServices } from " expo-speech-recognition" ;
755
755
756
756
const packages = ExpoSpeechRecognitionModule .getSpeechRecognitionServices ();
757
757
console .log (" Speech recognition services:" , packages .join (" , " ));
@@ -763,7 +763,7 @@ console.log("Speech recognition services:", packages.join(", "));
763
763
Returns the default voice recognition service on the device.
764
764
765
765
``` ts
766
- import { getDefaultRecognitionService } from " @jamsch/ expo-speech-recognition" ;
766
+ import { getDefaultRecognitionService } from " expo-speech-recognition" ;
767
767
768
768
const service = ExpoSpeechRecognitionModule .getDefaultRecognitionService ();
769
769
console .log (" Default recognition service:" , service .packageName );
@@ -775,7 +775,7 @@ console.log("Default recognition service:", service.packageName);
775
775
Returns the default voice assistant service on the device.
776
776
777
777
``` ts
778
- import { getAssistantService } from " @jamsch/ expo-speech-recognition" ;
778
+ import { getAssistantService } from " expo-speech-recognition" ;
779
779
780
780
const service = ExpoSpeechRecognitionModule .getAssistantService ();
781
781
console .log (" Default assistant service:" , service .packageName );
@@ -788,7 +788,7 @@ console.log("Default assistant service:", service.packageName);
788
788
Whether on-device speech recognition is available on the device.
789
789
790
790
``` ts
791
- import { supportsOnDeviceRecognition } from " @jamsch/ expo-speech-recognition" ;
791
+ import { supportsOnDeviceRecognition } from " expo-speech-recognition" ;
792
792
793
793
const available = supportsOnDeviceRecognition ();
794
794
console .log (" OnDevice recognition available:" , available );
@@ -799,7 +799,7 @@ console.log("OnDevice recognition available:", available);
799
799
Whether audio recording is supported during speech recognition. This mostly applies to Android devices, to check if it's at least Android 13.
800
800
801
801
``` ts
802
- import { supportsRecording } from " @jamsch/ expo-speech-recognition" ;
802
+ import { supportsRecording } from " expo-speech-recognition" ;
803
803
804
804
const available = supportsRecording ();
805
805
console .log (" Recording available:" , available );
@@ -814,7 +814,7 @@ You can see which locales are supported and installed on your device by running
814
814
To download the offline model for a specific locale, use the ` androidTriggerOfflineModelDownload ` function.
815
815
816
816
``` ts
817
- import { ExpoSpeechRecognitionModule } from " @jamsch/ expo-speech-recognition" ;
817
+ import { ExpoSpeechRecognitionModule } from " expo-speech-recognition" ;
818
818
819
819
// Download the offline model for the specified locale
820
820
ExpoSpeechRecognitionModule .androidTriggerOfflineModelDownload ({
@@ -856,7 +856,7 @@ import {
856
856
AVAudioSessionCategory ,
857
857
AVAudioSessionCategoryOptions ,
858
858
AVAudioSessionMode ,
859
- } from " @jamsch/ expo-speech-recognition" ;
859
+ } from " expo-speech-recognition" ;
860
860
861
861
setCategoryIOS ({
862
862
category: AVAudioSessionCategory .playAndRecord , // or "playAndRecord"
@@ -873,7 +873,7 @@ setCategoryIOS({
873
873
Returns the current audio session category and options. For advanced use cases, you may want to use this function to safely configure the audio session category and mode.
874
874
875
875
` ` ` ts
876
- import { getAudioSessionCategoryAndOptionsIOS } from " @jamsch/ expo-speech-recognition" ;
876
+ import { getAudioSessionCategoryAndOptionsIOS } from " expo-speech-recognition" ;
877
877
878
878
const values = getAudioSessionCategoryAndOptionsIOS ();
879
879
console .log (values );
@@ -885,7 +885,7 @@ console.log(values);
885
885
Sets the audio session active state.
886
886
887
887
` ` ` ts
888
- import { setAudioSessionActiveIOS } from " @jamsch/ expo-speech-recognition" ;
888
+ import { setAudioSessionActiveIOS } from " expo-speech-recognition" ;
889
889
890
890
setAudioSessionActiveIOS (true , {
891
891
notifyOthersOnDeactivation: true ,
0 commit comments