Spaces:
Runtime error
Runtime error
Upload 7 files
Browse files- app.py +37 -31
- configs/config.json +90 -0
- inference_main.py +2 -2
- logs/48k/D_1000000.pth +3 -0
- logs/48k/D_1M111000_sing.pth +3 -0
- logs/48k/G_1000000.pth +3 -0
- logs/48k/G_1M111000_sing.pth +3 -0
app.py
CHANGED
@@ -9,16 +9,13 @@ from inference.infer_tool import Svc
|
|
9 |
import logging
|
10 |
logging.getLogger('numba').setLevel(logging.WARNING)
|
11 |
|
12 |
-
model_name = "logs/48k/
|
13 |
-
config_name = "configs/
|
14 |
|
15 |
svc_model = Svc(model_name, config_name)
|
16 |
-
|
17 |
-
"岁己(本音)": "suiji"
|
18 |
-
}
|
19 |
-
def vc_fn(sid, input_audio, vc_transform):
|
20 |
if input_audio is None:
|
21 |
-
return
|
22 |
sampling_rate, audio = input_audio
|
23 |
# print(audio.shape,sampling_rate)
|
24 |
duration = audio.shape[0] / sampling_rate
|
@@ -31,10 +28,9 @@ def vc_fn(sid, input_audio, vc_transform):
|
|
31 |
out_wav_path = io.BytesIO()
|
32 |
soundfile.write(out_wav_path, audio, 16000, format="wav")
|
33 |
out_wav_path.seek(0)
|
34 |
-
|
35 |
-
out_audio, out_sr = svc_model.infer(sid, vc_transform, out_wav_path)
|
36 |
_audio = out_audio.cpu().numpy()
|
37 |
-
return
|
38 |
|
39 |
app = gr.Blocks()
|
40 |
with app:
|
@@ -42,38 +38,48 @@ with app:
|
|
42 |
with gr.TabItem("歌声音色转换"):
|
43 |
gr.Markdown(value="""
|
44 |
# 强烈建议☝️先看一遍使用说明
|
45 |
-
|
46 |
-
## 这是 sovits 3.0 48kHz AI
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
""")
|
56 |
-
sid = gr.Dropdown(label="音色", choices=["岁己(本音)"], value="岁己(本音)")
|
57 |
vc_input3 = gr.Audio(label="输入音频(长度请控制在30s左右,过长可能会爆内存)")
|
58 |
vc_transform = gr.Number(label="变调(整数,可以正负,半音数量,升高八度就是12)", value=0)
|
59 |
vc_submit = gr.Button("转换", variant="primary")
|
60 |
-
vc_output1 = gr.Textbox(label="输出日志")
|
61 |
vc_output2 = gr.Audio(label="输出音频(最右侧三个点可以下载)")
|
62 |
-
vc_submit.click(vc_fn, [
|
63 |
with gr.TabItem("亿点点使用说明➕保姆级本地部署教程"):
|
64 |
gr.Markdown(value="""
|
65 |
# 强烈建议👇先看一遍使用说明
|
66 |
-
|
67 |
### 输入的音频一定要是纯净的干音,不要把歌曲直接扔进来
|
68 |
-
|
69 |
-
###
|
70 |
-
|
71 |
### 对陈述语气没多大作用,实在没干音库的话,你可以自己唱然后升十几个调慢慢试效果
|
72 |
-
|
73 |
-
###
|
|
|
|
|
74 |
|
75 |
# 在本地部署并使用 inference_main.py 处理的保姆级教程:
|
76 |
-
|
77 |
### 0. 创建一个存放文件的目录,例如 D:\\SUI\\
|
78 |
|
79 |
### 1. 安装所需的软件
|
@@ -81,7 +87,7 @@ with app:
|
|
81 |
1. [miniconda-Python3.8](https://docs.conda.io/en/latest/miniconda.html#windows-installers)(未测试其他Python版本)[点这里可以直接下载](https://repo.anaconda.com/miniconda/Miniconda3-py38_22.11.1-1-Windows-x86_64.exe),Just Me 与 All Users 都行,其余可无脑下一步
|
82 |
|
83 |
2. [git](https://git-scm.com/download/win)(建议使用便携版)[点这里可以直接下载(便携版v2.39.0.2)](https://github.com/git-for-windows/git/releases/download/v2.39.0.windows.2/PortableGit-2.39.0.2-64-bit.7z.exe),路径填 D:\\SUI\\git\\
|
84 |
-
|
85 |
### 2. 在开始菜单中运行 Anaconda Powershell Prompt 并配置环境(除了工作目录,复制粘贴回车即可)
|
86 |
|
87 |
```
|
|
|
9 |
import logging
|
10 |
logging.getLogger('numba').setLevel(logging.WARNING)
|
11 |
|
12 |
+
model_name = "logs/48k/G_1M111000_sing.pth"
|
13 |
+
config_name = "configs/config.json"
|
14 |
|
15 |
svc_model = Svc(model_name, config_name)
|
16 |
+
def vc_fn(input_audio, vc_transform):
|
|
|
|
|
|
|
17 |
if input_audio is None:
|
18 |
+
return None
|
19 |
sampling_rate, audio = input_audio
|
20 |
# print(audio.shape,sampling_rate)
|
21 |
duration = audio.shape[0] / sampling_rate
|
|
|
28 |
out_wav_path = io.BytesIO()
|
29 |
soundfile.write(out_wav_path, audio, 16000, format="wav")
|
30 |
out_wav_path.seek(0)
|
31 |
+
out_audio, out_sr = svc_model.infer("suiji", vc_transform, out_wav_path)
|
|
|
32 |
_audio = out_audio.cpu().numpy()
|
33 |
+
return (48000, _audio)
|
34 |
|
35 |
app = gr.Blocks()
|
36 |
with app:
|
|
|
38 |
with gr.TabItem("歌声音色转换"):
|
39 |
gr.Markdown(value="""
|
40 |
# 强烈建议☝️先看一遍使用说明
|
41 |
+
|
42 |
+
## 这是 sovits 3.0 48kHz AI岁己歌声音色转换的在线demo
|
43 |
+
|
44 |
+
### 目前模型训练状态:1000000steps底模 + 111000steps
|
45 |
+
|
46 |
+
### 推理出来有概率会给吸气音上电,需要后期小修一下,大概可能也许是因为炼太久糊了
|
47 |
+
|
48 |
+
### 仓库内模型所用于训练的数据:
|
49 |
+
|
50 |
+
| G_1000000.pth | G_1M111000_sing.pth(现任) | G_1M100000_sing.pth(待产) | G_1M100000_sing1.pth(待产) |
|
51 |
+
| :----: | :----: | :----: | :----: |
|
52 |
+
| 12月录播(除电台)、出道至今22条歌投、10条歌切、圣诞音声(27.5小时) | G_1000000.pth作为底模_2022年所有唱歌投稿、唱歌切片、圣诞音声(3.9小时) | G_1000000.pth作为底模_(使用效果更好的UVR5模型去除BGM)出道至今所有唱歌投稿、唱歌切片、圣诞音声 | 先用1月录播(除电台)训练一个底模,再用出道至今所有唱歌投稿、唱歌切片、圣诞音声进行训练 |
|
53 |
+
|
54 |
+
### 仓库内G.pth、D.pth都有,欢迎作为底模用于进一步训练,如果要训练自己的数据请访问:[项目Github仓库](https://github.com/innnky/so-vits-svc/tree/main)、[教程《svc相关》](https://www.yuque.com/jiuwei-nui3d/qng6eg)
|
55 |
+
|
56 |
+
### 建议参考上方“使用说明”下的教程,在本地使用 inference_main.py 处理,我都写成这样了再小白应该都能搞定(不怕麻烦的话)
|
57 |
+
|
58 |
+
### 本地推理可调用GPU(NVIDIA),3060Ti 8G可推理一条20(建议) - 30s的音频,过长音频可分割后批量处理,就算用CPU推理也比 Hugging Face 快不少
|
59 |
+
|
60 |
+
### 有空可能会折腾一下导出onnx,抛弃这堆较为臃肿的Python依赖
|
61 |
""")
|
|
|
62 |
vc_input3 = gr.Audio(label="输入音频(长度请控制在30s左右,过长可能会爆内存)")
|
63 |
vc_transform = gr.Number(label="变调(整数,可以正负,半音数量,升高八度就是12)", value=0)
|
64 |
vc_submit = gr.Button("转换", variant="primary")
|
|
|
65 |
vc_output2 = gr.Audio(label="输出音频(最右侧三个点可以下载)")
|
66 |
+
vc_submit.click(vc_fn, [vc_input3, vc_transform], [vc_output2])
|
67 |
with gr.TabItem("亿点点使用说明➕保姆级本地部署教程"):
|
68 |
gr.Markdown(value="""
|
69 |
# 强烈建议👇先看一遍使用说明
|
70 |
+
|
71 |
### 输入的音频一定要是纯净的干音,不要把歌曲直接扔进来
|
72 |
+
|
73 |
+
### 混响和和声也不能有,UVR分离出人声之后需要注意一下
|
74 |
+
|
75 |
### 对陈述语气没多大作用,实在没干音库的话,你可以自己唱然后升十几个调慢慢试效果
|
76 |
+
|
77 |
+
### 数据集几乎全是杂谈的G_1000000.pth:长音不稳,音域不宽,选曲限制较大,可以多试试变调,没什么必要去用
|
78 |
+
|
79 |
+
### 现任的G_1M111000_sing.pth:有概率会给吸气音上电,需要后期小修一下
|
80 |
|
81 |
# 在本地部署并使用 inference_main.py 处理的保姆级教程:
|
82 |
+
|
83 |
### 0. 创建一个存放文件的目录,例如 D:\\SUI\\
|
84 |
|
85 |
### 1. 安装所需的软件
|
|
|
87 |
1. [miniconda-Python3.8](https://docs.conda.io/en/latest/miniconda.html#windows-installers)(未测试其他Python版本)[点这里可以直接下载](https://repo.anaconda.com/miniconda/Miniconda3-py38_22.11.1-1-Windows-x86_64.exe),Just Me 与 All Users 都行,其余可无脑下一步
|
88 |
|
89 |
2. [git](https://git-scm.com/download/win)(建议使用便携版)[点这里可以直接下载(便携版v2.39.0.2)](https://github.com/git-for-windows/git/releases/download/v2.39.0.windows.2/PortableGit-2.39.0.2-64-bit.7z.exe),路径填 D:\\SUI\\git\\
|
90 |
+
|
91 |
### 2. 在开始菜单中运行 Anaconda Powershell Prompt 并配置环境(除了工作目录,复制粘贴回车即可)
|
92 |
|
93 |
```
|
configs/config.json
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"train": {
|
3 |
+
"log_interval": 200,
|
4 |
+
"eval_interval": 1000,
|
5 |
+
"seed": 1234,
|
6 |
+
"epochs": 10000,
|
7 |
+
"learning_rate": 0.0002,
|
8 |
+
"betas": [
|
9 |
+
0.8,
|
10 |
+
0.99
|
11 |
+
],
|
12 |
+
"eps": 1e-09,
|
13 |
+
"batch_size": 12,
|
14 |
+
"fp16_run": false,
|
15 |
+
"lr_decay": 0.999875,
|
16 |
+
"segment_size": 17920,
|
17 |
+
"init_lr_ratio": 1,
|
18 |
+
"warmup_epochs": 0,
|
19 |
+
"c_mel": 45,
|
20 |
+
"c_kl": 1.0,
|
21 |
+
"use_sr": true,
|
22 |
+
"max_speclen": 384,
|
23 |
+
"port": "8001"
|
24 |
+
},
|
25 |
+
"data": {
|
26 |
+
"training_files": "filelists/train.txt",
|
27 |
+
"validation_files": "filelists/val.txt",
|
28 |
+
"max_wav_value": 32768.0,
|
29 |
+
"sampling_rate": 48000,
|
30 |
+
"filter_length": 1280,
|
31 |
+
"hop_length": 320,
|
32 |
+
"win_length": 1280,
|
33 |
+
"n_mel_channels": 80,
|
34 |
+
"mel_fmin": 0.0,
|
35 |
+
"mel_fmax": null
|
36 |
+
},
|
37 |
+
"model": {
|
38 |
+
"inter_channels": 192,
|
39 |
+
"hidden_channels": 192,
|
40 |
+
"filter_channels": 768,
|
41 |
+
"n_heads": 2,
|
42 |
+
"n_layers": 6,
|
43 |
+
"kernel_size": 3,
|
44 |
+
"p_dropout": 0.1,
|
45 |
+
"resblock": "1",
|
46 |
+
"resblock_kernel_sizes": [
|
47 |
+
3,
|
48 |
+
7,
|
49 |
+
11
|
50 |
+
],
|
51 |
+
"resblock_dilation_sizes": [
|
52 |
+
[
|
53 |
+
1,
|
54 |
+
3,
|
55 |
+
5
|
56 |
+
],
|
57 |
+
[
|
58 |
+
1,
|
59 |
+
3,
|
60 |
+
5
|
61 |
+
],
|
62 |
+
[
|
63 |
+
1,
|
64 |
+
3,
|
65 |
+
5
|
66 |
+
]
|
67 |
+
],
|
68 |
+
"upsample_rates": [
|
69 |
+
10,
|
70 |
+
8,
|
71 |
+
2,
|
72 |
+
2
|
73 |
+
],
|
74 |
+
"upsample_initial_channel": 512,
|
75 |
+
"upsample_kernel_sizes": [
|
76 |
+
16,
|
77 |
+
16,
|
78 |
+
4,
|
79 |
+
4
|
80 |
+
],
|
81 |
+
"n_layers_q": 3,
|
82 |
+
"use_spectral_norm": false,
|
83 |
+
"gin_channels": 256,
|
84 |
+
"ssl_dim": 256,
|
85 |
+
"n_speakers": 2
|
86 |
+
},
|
87 |
+
"spk": {
|
88 |
+
"suiji": 0
|
89 |
+
}
|
90 |
+
}
|
inference_main.py
CHANGED
@@ -14,8 +14,8 @@ from inference.infer_tool import Svc
|
|
14 |
logging.getLogger('numba').setLevel(logging.WARNING)
|
15 |
chunks_dict = infer_tool.read_temp("inference/chunks_temp.json")
|
16 |
|
17 |
-
model_path = "logs/48k/
|
18 |
-
config_path = "configs/
|
19 |
svc_model = Svc(model_path, config_path)
|
20 |
infer_tool.mkdir(["raw", "results"])
|
21 |
|
|
|
14 |
logging.getLogger('numba').setLevel(logging.WARNING)
|
15 |
chunks_dict = infer_tool.read_temp("inference/chunks_temp.json")
|
16 |
|
17 |
+
model_path = "logs/48k/G_1M111000_sing.pth"
|
18 |
+
config_path = "configs/config.json"
|
19 |
svc_model = Svc(model_path, config_path)
|
20 |
infer_tool.mkdir(["raw", "results"])
|
21 |
|
logs/48k/D_1000000.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dea84cb6648d8c504208edf9b3bb68680c33028c8b84a51c1917a43e29ffefea
|
3 |
+
size 561098185
|
logs/48k/D_1M111000_sing.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:81612e7166273210ec50849aafc1ef545522fb857961d4a50eac508ebe99942e
|
3 |
+
size 561098185
|
logs/48k/G_1000000.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:02030aa2e729f0af2227fd31f0499bc9769a7991b056d66ec1a9b009b5e37c50
|
3 |
+
size 699505437
|
logs/48k/G_1M111000_sing.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:67d8584f45d2662b652ac1555f49c52a7b569c67ccb470c169001bc96ab3b048
|
3 |
+
size 699505437
|