前置
目前 Taro 仅提供一种开发方式:安装 Taro 命令行工具(Taro CLI)进行开发。
Taro CLI 依赖于 Node.js 环境,所以在你的机器上必须安装 Node.js 环境。安装 Node.js 环境有很多种方法,如果你完全不了解 Node.js 可以访问 Node.js 官网 下载一个可执行程序进行安装。我们推荐安装 LTS 版本的 Node.js(目前 LTS 版本是 v12)。
当你的机器已经存在了 Node.js 环境,可以通过在终端输入命令 npm i -g @tarojs/cli
安装 Taro CLI。安装完毕之后,在终端输入命令 taro
,如果出现类似内容就说明安装成功了:
编辑器推荐使用 VSCode 或 WebStorm(或其它支持 Web 开发的 Jetbrains IDE)。
当你使用 VSCode 时,推荐安装 ESLint 插件,如果你使用 TypeScript,别忘了配置 eslint.probe
参数。
如果你愿意花钱又懒得折腾可以选择 WebStorm(或其它支持 Web 开发的 Jetbrains IDE),基本不需要配置。
不管使用 VSCode 还是 WebStrom,安装了上述插件之后使用 Taro 都实现自动补全和代码实时检查(linting)的功能。
我这里要演示的是react版本的:安装及使用 | Taro 文档
1. 小程序后台添加插件:微信同声传译
登录小程序后台:https://mp.weixin.qq.com
设置 -> 第三方设置 -> 添加插件

输入“微信同声传译”,点击搜索,之后选择并点击添加
复制它的AppID和最新版本号。

src/app.config.js
export default defineAppConfig({
plugins: {
WechatSI: {
version: '0.3.4',
provider: 'wx069ba97219f66d99',
},
},
....
})
单代码
import { requirePlugin, useReady, showModal } from '@tarojs/taro';
import { View, Text, Button } from '@tarojs/components'
import { useState } from 'react';
import 'taro-ui/dist/style/components/button.scss' // 按需引入
import './index.scss'
const Translation = () => {
// 引入插件:微信同声传译
const plugin = requirePlugin('WechatSI');
// 获取全局唯一的语音识别管理器recordRecoManager
const manager = plugin.getRecordRecognitionManager();
const [recordState, setRecordState] = useState(false);
const [content, setContent] = useState('');
const initRecord = () => {
manager.onRecognize = function (res) {
console.log(res);
};
// 正常开始录音识别时会调用此事件
manager.onStart = function (res) {
console.log('成功开始录音识别', res);
};
// 识别错误事件
manager.onError = function (res) {
console.error('error msg', res);
};
// 识别结束事件
manager.onStop = function (res) {
console.log('..............结束录音');
console.log(`录音临时文件地址 -->${res.tempFilePath}`);
console.log(`录音总时长 -->${res.duration}ms`);
console.log(`文件大小 --> ${res.fileSize}B`);
console.log(`语音内容 --> ${res.result}`);
if (res.result === '') {
showModal({
title: '提示',
content: '听不清楚,请重新说一遍!',
showCancel: false,
success(status) {
if (status.confirm) {
console.log('用户点击确定');
} else if (status.cancel) {
console.log('用户点击取消');
}
},
});
return;
}
const text = content + res.result;
setContent(text);
showModal({
title: '提示',
content: text,
showCancel: false,
success(status) {
if (status.confirm) {
console.log('用户点击确定');
} else if (status.cancel) {
console.log('用户点击取消');
}
},
});
};
};
useReady(() => {
initRecord();
});
const touchStart = (): void => {
debugger
console.log('test____');
setRecordState(true);
// 语音开始识别
manager.start({
lang: 'zh_CN', // 识别的语言,目前支持zh_CN en_US zh_HK sichuanhua
});
};
const touchEnd = (): void => {
setRecordState(false);
// 语音结束识别
manager.stop();
};
const handler = () => {
if(recordState){
touchEnd()
}else {
touchStart()
}
}
return (
<View className='index'>
<View>
<Button
onLongPress={touchStart}
onTouchEnd={touchEnd}
aria-role='button' aria-label='关键词内容播报'
className='recognition-play-key'
>{recordState ? '……' : '按住说话'}</Button>
</View>
<View>
<Text>{content}</Text>
</View>
</View>
)
}
export default Translation
微信小程序taro语音转文字tsx代码,直接封装成组件
import Taro, { VideoContext } from '@tarojs/taro'
import {
Button,
Field,
Popup,
Radio,
RadioGroup,
Image,
Icon,
} from '@antmjs/vantui'
import { View, Video, Text } from '@tarojs/components'
import { useEffect, useRef, useState } from 'react'
import classnames from 'classnames'
import dayjs from 'dayjs'
import rimg from '@/assets/img/sound-record.png'
import obsUploadFile from '@/utils/obsUpload'
import AssistantApi from '@/services/assistant'
import styles from './index.module.scss'
const mikeImg1 = require('../../../../assets/img/mike/voice_icon_speech_sound_1.png')
const mikeImg2 = require('../../../../assets/img/mike/voice_icon_speech_sound_2.png')
const mikeImg3 = require('../../../../assets/img/mike/voice_icon_speech_sound_3.png')
const mikeImg4 = require('../../../../assets/img/mike/voice_icon_speech_sound_4.png')
const mikeImg5 = require('../../../../assets/img/mike/voice_icon_speech_sound_5.png')
export default props => {
const { module, show, closeAction } = props
const videoRef = useRef()
const [part, setPart] = useState('Use')
const [type, setType] = useState('Proposal')
const [moreShow, setMoreShow] = useState(false)
const [saving, setSaving] = useState(false)
const [videoUrl, setVideoUrl] = useState('')
const [videos, setVideos] = useState<Array<TaroGeneral.IAnyObject>>([])
const [inputValue, setInputValue] = useState('')
const [pictures, setPictures] = useState<Array<string>>([])
// 录音图片显示状态
const [imgFalg, setImgFalg] = useState(false)
//麦克风图, 显示状态
const mikeImgs =[mikeImg1,mikeImg2,mikeImg3,mikeImg4,mikeImg5]
const [mikeIndex, setMikeIndex] = useState(0)
//麦克风定时器
const [mikeTime, setMikeTime] = useState(0)
const onPartRadioChange = ({ detail }) => {
setPart(detail)
}
const handleTabGroupItemClick = val => {
setType(val)
}
const handleInputChange = ({ detail }) => {
setInputValue(detail)
}
const handleClose = () => {
closeAction()
}
const handleUploadPicture = async () => {
if (pictures.length >= 3) {
Taro.showToast({ title: '最多上传3张图片', icon: 'none' })
return
}
const { tempFilePaths } = await Taro.chooseImage({
// 默认9
count: 3,
// 可以指定是原图还是压缩图,默认二者都有
sizeType: ['original', 'compressed'],
sourceType: ['album', 'camera'],
})
if (Array.isArray(tempFilePaths) && tempFilePaths.length > 0) {
const tempUrls = Array<string>()
await tempFilePaths.forEach(async file => {
const result = await obsUploadFile(file)
tempUrls.push(result.url)
setPictures([...pictures, ...tempUrls])
})
}
}
const handleVideoPlayClick = autoPlay => {
if (videos.length <= 0) {
AssistantApi.getExplain({
Module: module,
}).then(({ data }) => {
const { Data, ResponseStatus } = data
if (ResponseStatus.ErrorCode !== 0 || Data.DataList.length <= 0) {
return
}
setVideos(Data.DataList)
setVideoUrl(Data.DataList[0].FileUrl)
videoRef.current = Taro.createVideoContext('myVideo') as any
if (autoPlay) {
if (Data.DataList.length <= 0 || !Data.DataList[0].FileUrl) {
Taro.showToast({
title: '暂无可播放视频',
icon: 'none',
})
return
}
setTimeout(() => {
if (videoRef.current) {
;(videoRef.current as VideoContext).requestFullScreen({
direction: 0,
})
;(videoRef.current as any).play()
}
}, 1000)
} else {
setMoreShow(true)
}
})
return
}
// const videoContext = videoRef.current
if (videoUrl && autoPlay && videoRef.current) {
// console.log(videoRef.current, 'handleVideoPlayClick')
;(videoRef.current as any).play()
;(videoRef.current as VideoContext).requestFullScreen({ direction: 0 })
} else {
setMoreShow(true)
}
}
const clearPicture = pi => {
Taro.showModal({
content: '是否删除图片?',
showCancel: true,
success: res => {
if (res.confirm) {
const picArr = pictures
picArr.splice(pi, 1)
setPictures([...picArr])
}
},
})
}
const submit = () => {
if (saving) {
return
}
if (!inputValue) {
Taro.showToast({ title: '问题或建议不能为空', icon: 'none' })
return
}
setSaving(true)
AssistantApi.add({
Module: module,
FeedbackObject: part,
FeedbackType: type,
Context: inputValue,
ImageList: pictures,
}).then(
() => {
Taro.showToast({
title: '保存成功',
icon: 'success',
})
// setPart('Test')
// setType('Question')
setInputValue('')
setPictures([])
closeAction()
setSaving(false)
},
() => {
setSaving(false)
},
)
}
const handleMoreClick = () => {
handleVideoPlayClick(false)
}
const handleMoreVideoClose = () => {
setMoreShow(false)
}
const handlePlayVideClick = url => {
setVideoUrl(url)
setTimeout(() => {
if (videoUrl && videoRef.current) {
// console.log(videoRef.current, 'handleVideoPlayClick')
;(videoRef.current as any).play()
;(videoRef.current as VideoContext).requestFullScreen({ direction: 0 })
}
}, 200)
}
const handleSaveVideClick = url => {
Taro.downloadFile({
url: `${url}?time=${dayjs().valueOf()}`, // 仅为示例,并非真实的资源
success(res) {
// 只要服务器有响应数据,就会把响应内容写入文件并进入 success 回调,业务需要自行判断是否下载到了想要的内容
if (res.statusCode === 200) {
Taro.saveVideoToPhotosAlbum({
filePath: res.tempFilePath,
success: saveRes => {
console.log(saveRes.errMsg)
if (saveRes.errMsg === 'saveVideoToPhotosAlbum:ok') {
Taro.showToast({ title: '保存成功', icon: 'none' })
} else {
Taro.showToast({ title: saveRes.errMsg, icon: 'none' })
}
},
})
}
},
})
}
const handleFullscreenChange = e => {
if (!e.detail.fullScreen) {
;(videoRef.current as any).stop()
}
}
//录音对象
const innerAudioContext = Taro.createInnerAudioContext()
const plugin = Taro.requirePlugin("WechatSI")
let manager = plugin.getRecordRecognitionManager()
manager.onStop = (res)=> {
console.log("result", res)
setInputValue(res.result)
}
//录音开始
const soundRecordStart=()=>{
Taro.authorize({
scope: 'scope.record',
success:()=>{
console.log("录音授权成功");
var i =5
const startTime = setInterval(()=>{
i++
setMikeIndex(i % 5)
},200)
setMikeTime(startTime)
manager.start({duration:30000, lang: "zh_CN"})
},
fail:()=> {
Taro.showModal({
title: '提示',
content: '您未授权录音,功能将无法使用',
showCancel: true,
confirmText: "授权",
confirmColor: "#52a2d8",
})
}
})
}
//录音结束
const soundRecordEnd=()=>{
clearInterval(mikeTime)
setMikeTime(0)
manager.stop()
}
useEffect(() => {})
const renderVideoItems = () => {
return videos.map(v => {
return (
<View>
<Image
width="36px"
height="36px"
src="https://datacenter-gz-obs-dev.obs.cn-south-1.myhuaweicloud.com/assets/sky-data-center-mp/video-blue.png"
/>
{v.Title}
<View className={styles.operation}>
<Image
width="20px"
height="20px"
src="https://datacenter-gz-obs-dev.obs.cn-south-1.myhuaweicloud.com/assets/sky-data-center-mp/eye.png"
onClick={() => {
handlePlayVideClick(v.FileUrl)
}}
/>
<Image
width="20px"
height="20px"
src="https://datacenter-gz-obs-dev.obs.cn-south-1.myhuaweicloud.com/assets/sky-data-center-mp/download.png"
onClick={() => {
handleSaveVideClick(v.FileUrl)
}}
/>
</View>
</View>
)
})
}
const renderHeader = () => {
return (
<View className={styles.assistantHeader}>
<RadioGroup
value={part}
onChange={onPartRadioChange}
direction="horizontal"
>
<Radio value={part} name="Test" iconSize="14px">
测试
</Radio>
<Radio value={part} name="Use" iconSize="14px">
使用
</Radio>
<Radio value={part} name="Helper" iconSize="14px">
助手
</Radio>
</RadioGroup>
<View className={styles.tabGroup}>
<View
className={classnames({
[styles.tabGroupActived]: type === 'Question',
})}
onClick={() => {
handleTabGroupItemClick('Question')
}}
>
问题
</View>
<View
className={classnames({
[styles.tabGroupActived]: type === 'Proposal',
})}
onClick={() => {
handleTabGroupItemClick('Proposal')
}}
>
建议
</View>
</View>
<Button
color="#3370FE"
size="small"
round
loading={saving}
onClick={submit}
>
提交
</Button>
</View>
)
}
const renderBottom = () => {
return (
<View className={styles.assistantBottom}>
{pictures.length > 0 &&
pictures.map((pic, index) => {
return (
<View className={styles.pictureWrap}>
<Image width="48px" height="48px" src={pic} />
<Icon
name="clear"
size="20px"
onClick={() => {
clearPicture(index)
}}
/>
</View>
)
})}
{pictures.length < 3 && (
<Image
width="48px"
height="48px"
src="https://datacenter-gz-obs-dev.obs.cn-south-1.myhuaweicloud.com/assets/sky-data-center-mp/photo.png"
onClick={handleUploadPicture}
/>
)}
<Image
width="48px"
height="48px"
src="https://datacenter-gz-obs-dev.obs.cn-south-1.myhuaweicloud.com/assets/sky-data-center-mp/video.png"
onClick={() => {
handleVideoPlayClick(true)
}}
/>
<View className={styles.assistantBottomMore} onClick={handleMoreClick}>
<View />
<View />
<View />
</View>
{videoUrl ? (
<Video
id="myVideo"
src={videoUrl}
className={styles.assistantVideo}
onFullscreenChange={handleFullscreenChange}
/>
) : null}
<View className={styles.cancelButton} onClick={handleClose}>
取消
</View>
</View>
)
}
return (
// <MovableArea className={styles.assistantWrap}>
<View>
<Popup show={show} round position="bottom" onClickOverlay={handleClose}>
{mikeTime!=0 && <Image
src={mikeImgs[mikeIndex]}
width="52px"
height="83px"
style={{position:'fixed',left:'45%',zIndex:'99',top:'40%'}}
/>}
{renderHeader()}
<View style={{ display: 'flex' }}>
<Field
className={styles.assistantMainInput}
value={inputValue}
maxlength={500}
type="textarea"
placeholder="请输入问题或建议"
border={false}
autosize
onChange={handleInputChange}
/>
<View style={{ display: 'flex', alignItems: 'center' }}>
<Icon
name="arrow-left"
size="26px"
onClick={() => {
setImgFalg(!imgFalg)
}}
/>
{imgFalg ? (
<View
style={{
flex: 1,
display: 'flex',
alignItems: 'center',
flexDirection: 'column',
}}
>
<Text style={{marginBottom:'10rpx'}}>按住说话</Text>
<Image
src={rimg}
width="50px"
height="50px"
style={{ margin: '0 30rpx' }}
onTouchStart={()=>{
console.log('按住开始')
soundRecordStart()
}}
onTouchEnd={()=>{
console.log('按住结束')
soundRecordEnd()
}}
/>
</View>
) : null}
</View>
</View>
{renderBottom()}
</Popup>
<Popup show={moreShow} round onClickOverlay={handleMoreVideoClose}>
<View className={styles.videoList}>
{videos.length > 0 && renderVideoItems()}
<View onClick={handleMoreVideoClose}>关闭</View>
</View>
</Popup>
</View>
// </MovableArea>
)
}

播放录音功能
//录音对象
const innerAudioText = Taro.createInnerAudioContext({
useWebAudioImplement: true // 是否使用 WebAudio 作为底层音频驱动,默认关闭。对于短音频、播放频繁的音频建议开启此选项,开启后将获得更优的性能表现。由于开启此选项后也会带来一定的内存增长,因此对于长音频建议关闭此选项
});
//函数
const playAudio = () => {
innerAudioText.src = url;
//播放
innerAudioText.autoplay = true;
//其他api
innerAudioContext.pause() // 暂停
innerAudioContext.stop() // 停止
};
调用函数开始播放
在录音页面使用
可以将res.tempFilePath作为url传入播放
const innerAudioContext = Taro.createInnerAudioContext()
const plugin = Taro.requirePlugin("WechatSI")
let manager = plugin.getRecordRecognitionManager()
manager.onStop = (res)=> {
console.log("result", res)
setInputValue(res.result)
console.log('..............结束录音')
console.log('录音临时文件地址 -->' + res.tempFilePath);
setAudioText(res.tempFilePath)
console.log('录音总时长 -->' + res.duration + 'ms');
console.log('文件大小 --> ' + res.fileSize + 'B');
console.log('语音内容 --> ' + res.result);
}