NeoPy commited on
Commit
7fca5cf
·
verified ·
1 Parent(s): 44f862e

Update infer-web.py

Browse files
Files changed (1) hide show
  1. infer-web.py +5 -806
infer-web.py CHANGED
@@ -1,811 +1,10 @@
1
- import os
2
- import sys
3
- from dotenv import load_dotenv
4
-
5
- now_dir = os.getcwd()
6
- sys.path.append(now_dir)
7
- load_dotenv()
8
- from infer.modules.vc.modules import VC
9
- from infer.modules.uvr5.modules import uvr
10
- from infer.lib.train.process_ckpt import (
11
- change_info,
12
- extract_small_model,
13
- merge,
14
- show_info,
15
- )
16
- from i18n.i18n import I18nAuto
17
- from configs.config import Config
18
- from sklearn.cluster import MiniBatchKMeans
19
- import torch, platform
20
- import numpy as np
21
  import gradio as gr
22
- import faiss
23
- import fairseq
24
- import pathlib
25
- import json
26
- from time import sleep
27
- from subprocess import Popen
28
- from random import shuffle
29
- import warnings
30
- import traceback
31
- import threading
32
- import shutil
33
- import logging
34
-
35
-
36
- logging.getLogger("numba").setLevel(logging.WARNING)
37
- logging.getLogger("httpx").setLevel(logging.WARNING)
38
-
39
- logger = logging.getLogger(__name__)
40
-
41
- tmp = os.path.join(now_dir, "TEMP")
42
- shutil.rmtree(tmp, ignore_errors=True)
43
- shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True)
44
- shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % (now_dir), ignore_errors=True)
45
- os.makedirs(tmp, exist_ok=True)
46
- os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True)
47
- os.makedirs(os.path.join(now_dir, "assets/weights"), exist_ok=True)
48
- os.environ["TEMP"] = tmp
49
- warnings.filterwarnings("ignore")
50
- torch.manual_seed(114514)
51
-
52
-
53
- config = Config()
54
- vc = VC(config)
55
-
56
- if not config.nocheck:
57
- from infer.lib.rvcmd import check_all_assets, download_all_assets
58
-
59
- if not check_all_assets():
60
- download_all_assets(tmpdir=tmp)
61
- if not check_all_assets():
62
- logging.error("counld not satisfy all assets needed.")
63
- exit(1)
64
-
65
- if config.dml == True:
66
-
67
- def forward_dml(ctx, x, scale):
68
- ctx.scale = scale
69
- res = x.clone().detach()
70
- return res
71
-
72
- fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml
73
- i18n = I18nAuto()
74
- logger.info(i18n)
75
- # 判断是否有能用来训练和加速推理的N卡
76
- ngpu = torch.cuda.device_count()
77
- gpu_infos = []
78
- mem = []
79
- if_gpu_ok = False
80
-
81
- if torch.cuda.is_available() or ngpu != 0:
82
- for i in range(ngpu):
83
- gpu_name = torch.cuda.get_device_name(i)
84
- if any(
85
- value in gpu_name.upper()
86
- for value in [
87
- "10",
88
- "16",
89
- "20",
90
- "30",
91
- "40",
92
- "A2",
93
- "A3",
94
- "A4",
95
- "P4",
96
- "A50",
97
- "500",
98
- "A60",
99
- "70",
100
- "80",
101
- "90",
102
- "M4",
103
- "T4",
104
- "TITAN",
105
- "4060",
106
- "L",
107
- "6000",
108
- ]
109
- ):
110
- # A10#A100#V100#A40#P40#M40#K80#A4500
111
- if_gpu_ok = True # 至少有一张能用的N卡
112
- gpu_infos.append("%s\t%s" % (i, gpu_name))
113
- mem.append(
114
- int(
115
- torch.cuda.get_device_properties(i).total_memory
116
- / 1024
117
- / 1024
118
- / 1024
119
- + 0.4
120
- )
121
- )
122
- if if_gpu_ok and len(gpu_infos) > 0:
123
- gpu_info = "\n".join(gpu_infos)
124
- default_batch_size = min(mem) // 2
125
- else:
126
- gpu_info = i18n("很遗憾您这没有能用的显卡来支持您训练")
127
- default_batch_size = 1
128
- gpus = "-".join([i[0] for i in gpu_infos])
129
-
130
-
131
- weight_root = os.getenv("weight_root")
132
- weight_uvr5_root = os.getenv("weight_uvr5_root")
133
- index_root = os.getenv("index_root")
134
- outside_index_root = os.getenv("outside_index_root")
135
-
136
- names = []
137
- for name in os.listdir(weight_root):
138
- if name.endswith(".pth"):
139
- names.append(name)
140
- index_paths = []
141
-
142
-
143
- def lookup_indices(index_root):
144
- global index_paths
145
- for root, dirs, files in os.walk(index_root, topdown=False):
146
- for name in files:
147
- if name.endswith(".index") and "trained" not in name:
148
- index_paths.append("%s/%s" % (root, name))
149
-
150
-
151
- lookup_indices(index_root)
152
- lookup_indices(outside_index_root)
153
- uvr5_names = []
154
- for name in os.listdir(weight_uvr5_root):
155
- if name.endswith(".pth") or "onnx" in name:
156
- uvr5_names.append(name.replace(".pth", ""))
157
-
158
-
159
- def change_choices():
160
- names = []
161
- for name in os.listdir(weight_root):
162
- if name.endswith(".pth"):
163
- names.append(name)
164
- index_paths = []
165
- for root, dirs, files in os.walk(index_root, topdown=False):
166
- for name in files:
167
- if name.endswith(".index") and "trained" not in name:
168
- index_paths.append("%s/%s" % (root, name))
169
- return {"choices": sorted(names), "__type__": "update"}, {
170
- "choices": sorted(index_paths),
171
- "__type__": "update",
172
- }
173
-
174
-
175
- def clean():
176
- return {"value": "", "__type__": "update"}
177
-
178
-
179
- def export_onnx(ModelPath, ExportedPath):
180
- from infer.modules.onnx.export import export_onnx as eo
181
-
182
- eo(ModelPath, ExportedPath)
183
-
184
-
185
- sr_dict = {
186
- "32k": 32000,
187
- "40k": 40000,
188
- "48k": 48000,
189
- }
190
-
191
-
192
- def if_done(done, p):
193
- while 1:
194
- if p.poll() is None:
195
- sleep(0.5)
196
- else:
197
- break
198
- done[0] = True
199
-
200
-
201
- def if_done_multi(done, ps):
202
- while 1:
203
- # poll==None代表进程未结束
204
- # 只要有一个进程未结束都不停
205
- flag = 1
206
- for p in ps:
207
- if p.poll() is None:
208
- flag = 0
209
- sleep(0.5)
210
- break
211
- if flag == 1:
212
- break
213
- done[0] = True
214
-
215
-
216
- def preprocess_dataset(trainset_dir, exp_dir, sr, n_p):
217
- sr = sr_dict[sr]
218
- os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
219
- f = open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "w")
220
- f.close()
221
- cmd = '"%s" infer/modules/train/preprocess.py "%s" %s %s "%s/logs/%s" %s %.1f' % (
222
- config.python_cmd,
223
- trainset_dir,
224
- sr,
225
- n_p,
226
- now_dir,
227
- exp_dir,
228
- config.noparallel,
229
- config.preprocess_per,
230
- )
231
- logger.info("Execute: " + cmd)
232
- # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir
233
- p = Popen(cmd, shell=True)
234
- # 煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
235
- done = [False]
236
- threading.Thread(
237
- target=if_done,
238
- args=(
239
- done,
240
- p,
241
- ),
242
- ).start()
243
- while 1:
244
- with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f:
245
- yield (f.read())
246
- sleep(1)
247
- if done[0]:
248
- break
249
- with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f:
250
- log = f.read()
251
- logger.info(log)
252
- yield log
253
-
254
-
255
- # but2.click(extract_f0,[gpus6,np7,f0method8,if_f0_3,trainset_dir4],[info2])
256
- def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvpe):
257
- gpus = gpus.split("-")
258
- os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
259
- f = open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "w")
260
- f.close()
261
- if if_f0:
262
- if f0method != "rmvpe_gpu":
263
- cmd = (
264
- '"%s" infer/modules/train/extract/extract_f0_print.py "%s/logs/%s" %s %s'
265
- % (
266
- config.python_cmd,
267
- now_dir,
268
- exp_dir,
269
- n_p,
270
- f0method,
271
- )
272
- )
273
- logger.info("Execute: " + cmd)
274
- p = Popen(
275
- cmd, shell=True, cwd=now_dir
276
- ) # , stdin=PIPE, stdout=PIPE,stderr=PIPE
277
- # 煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
278
- done = [False]
279
- threading.Thread(
280
- target=if_done,
281
- args=(
282
- done,
283
- p,
284
- ),
285
- ).start()
286
- else:
287
- if gpus_rmvpe != "-":
288
- gpus_rmvpe = gpus_rmvpe.split("-")
289
- leng = len(gpus_rmvpe)
290
- ps = []
291
- for idx, n_g in enumerate(gpus_rmvpe):
292
- cmd = (
293
- '"%s" infer/modules/train/extract/extract_f0_rmvpe.py %s %s %s "%s/logs/%s" %s '
294
- % (
295
- config.python_cmd,
296
- leng,
297
- idx,
298
- n_g,
299
- now_dir,
300
- exp_dir,
301
- config.is_half,
302
- )
303
- )
304
- logger.info("Execute: " + cmd)
305
- p = Popen(
306
- cmd, shell=True, cwd=now_dir
307
- ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
308
- ps.append(p)
309
- # 煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
310
- done = [False]
311
- threading.Thread(
312
- target=if_done_multi, #
313
- args=(
314
- done,
315
- ps,
316
- ),
317
- ).start()
318
- else:
319
- cmd = (
320
- config.python_cmd
321
- + ' infer/modules/train/extract/extract_f0_rmvpe_dml.py "%s/logs/%s" '
322
- % (
323
- now_dir,
324
- exp_dir,
325
- )
326
- )
327
- logger.info("Execute: " + cmd)
328
- p = Popen(
329
- cmd, shell=True, cwd=now_dir
330
- ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
331
- p.wait()
332
- done = [True]
333
- while 1:
334
- with open(
335
- "%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r"
336
- ) as f:
337
- yield (f.read())
338
- sleep(1)
339
- if done[0]:
340
- break
341
- with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
342
- log = f.read()
343
- logger.info(log)
344
- yield log
345
- # 对不同part分别开多进程
346
- """
347
- n_part=int(sys.argv[1])
348
- i_part=int(sys.argv[2])
349
- i_gpu=sys.argv[3]
350
- exp_dir=sys.argv[4]
351
- os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu)
352
- """
353
- leng = len(gpus)
354
- ps = []
355
- for idx, n_g in enumerate(gpus):
356
- cmd = (
357
- '"%s" infer/modules/train/extract_feature_print.py %s %s %s %s "%s/logs/%s" %s %s'
358
- % (
359
- config.python_cmd,
360
- config.device,
361
- leng,
362
- idx,
363
- n_g,
364
- now_dir,
365
- exp_dir,
366
- version19,
367
- config.is_half,
368
- )
369
- )
370
- logger.info("Execute: " + cmd)
371
- p = Popen(
372
- cmd, shell=True, cwd=now_dir
373
- ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
374
- ps.append(p)
375
- # 煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
376
- done = [False]
377
- threading.Thread(
378
- target=if_done_multi,
379
- args=(
380
- done,
381
- ps,
382
- ),
383
- ).start()
384
- while 1:
385
- with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
386
- yield (f.read())
387
- sleep(1)
388
- if done[0]:
389
- break
390
- with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
391
- log = f.read()
392
- logger.info(log)
393
- yield log
394
-
395
-
396
- def get_pretrained_models(path_str, f0_str, sr2):
397
- if_pretrained_generator_exist = os.access(
398
- "assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK
399
- )
400
- if_pretrained_discriminator_exist = os.access(
401
- "assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK
402
- )
403
- if not if_pretrained_generator_exist:
404
- logger.warning(
405
- "assets/pretrained%s/%sG%s.pth not exist, will not use pretrained model",
406
- path_str,
407
- f0_str,
408
- sr2,
409
- )
410
- if not if_pretrained_discriminator_exist:
411
- logger.warning(
412
- "assets/pretrained%s/%sD%s.pth not exist, will not use pretrained model",
413
- path_str,
414
- f0_str,
415
- sr2,
416
- )
417
- return (
418
- (
419
- "assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2)
420
- if if_pretrained_generator_exist
421
- else ""
422
- ),
423
- (
424
- "assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)
425
- if if_pretrained_discriminator_exist
426
- else ""
427
- ),
428
- )
429
-
430
 
431
- def change_sr2(sr2, if_f0_3, version19):
432
- path_str = "" if version19 == "v1" else "_v2"
433
- f0_str = "f0" if if_f0_3 else ""
434
- return get_pretrained_models(path_str, f0_str, sr2)
435
-
436
-
437
- def change_version19(sr2, if_f0_3, version19):
438
- path_str = "" if version19 == "v1" else "_v2"
439
- if sr2 == "32k" and version19 == "v1":
440
- sr2 = "40k"
441
- to_return_sr2 = (
442
- {"choices": ["40k", "48k"], "__type__": "update", "value": sr2}
443
- if version19 == "v1"
444
- else {"choices": ["40k", "48k", "32k"], "__type__": "update", "value": sr2}
445
- )
446
- f0_str = "f0" if if_f0_3 else ""
447
- return (
448
- *get_pretrained_models(path_str, f0_str, sr2),
449
- to_return_sr2,
450
- )
451
-
452
-
453
- def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15
454
- path_str = "" if version19 == "v1" else "_v2"
455
- return (
456
- {"visible": if_f0_3, "__type__": "update"},
457
- {"visible": if_f0_3, "__type__": "update"},
458
- *get_pretrained_models(path_str, "f0" if if_f0_3 == True else "", sr2),
459
- )
460
-
461
-
462
- # but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16])
463
- def click_train(
464
- exp_dir1,
465
- sr2,
466
- if_f0_3,
467
- spk_id5,
468
- save_epoch10,
469
- total_epoch11,
470
- batch_size12,
471
- if_save_latest13,
472
- pretrained_G14,
473
- pretrained_D15,
474
- gpus16,
475
- if_cache_gpu17,
476
- if_save_every_weights18,
477
- version19,
478
- ):
479
- # 生成filelist
480
- exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
481
- os.makedirs(exp_dir, exist_ok=True)
482
- gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir)
483
- feature_dir = (
484
- "%s/3_feature256" % (exp_dir)
485
- if version19 == "v1"
486
- else "%s/3_feature768" % (exp_dir)
487
- )
488
- if if_f0_3:
489
- f0_dir = "%s/2a_f0" % (exp_dir)
490
- f0nsf_dir = "%s/2b-f0nsf" % (exp_dir)
491
- names = (
492
- set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)])
493
- & set([name.split(".")[0] for name in os.listdir(feature_dir)])
494
- & set([name.split(".")[0] for name in os.listdir(f0_dir)])
495
- & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)])
496
- )
497
- else:
498
- names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set(
499
- [name.split(".")[0] for name in os.listdir(feature_dir)]
500
- )
501
- opt = []
502
- for name in names:
503
- if if_f0_3:
504
- opt.append(
505
- "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s"
506
- % (
507
- gt_wavs_dir.replace("\\", "\\\\"),
508
- name,
509
- feature_dir.replace("\\", "\\\\"),
510
- name,
511
- f0_dir.replace("\\", "\\\\"),
512
- name,
513
- f0nsf_dir.replace("\\", "\\\\"),
514
- name,
515
- spk_id5,
516
- )
517
- )
518
- else:
519
- opt.append(
520
- "%s/%s.wav|%s/%s.npy|%s"
521
- % (
522
- gt_wavs_dir.replace("\\", "\\\\"),
523
- name,
524
- feature_dir.replace("\\", "\\\\"),
525
- name,
526
- spk_id5,
527
- )
528
- )
529
- fea_dim = 256 if version19 == "v1" else 768
530
- if if_f0_3:
531
- for _ in range(2):
532
- opt.append(
533
- "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
534
- % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5)
535
- )
536
- else:
537
- for _ in range(2):
538
- opt.append(
539
- "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s"
540
- % (now_dir, sr2, now_dir, fea_dim, spk_id5)
541
- )
542
- shuffle(opt)
543
- with open("%s/filelist.txt" % exp_dir, "w") as f:
544
- f.write("\n".join(opt))
545
- logger.debug("Write filelist done")
546
- # 生成config#无需生成config
547
- # cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0"
548
- logger.info("Use gpus: %s", str(gpus16))
549
- if pretrained_G14 == "":
550
- logger.info("No pretrained Generator")
551
- if pretrained_D15 == "":
552
- logger.info("No pretrained Discriminator")
553
- if version19 == "v1" or sr2 == "40k":
554
- config_path = "v1/%s.json" % sr2
555
- else:
556
- config_path = "v2/%s.json" % sr2
557
- config_save_path = os.path.join(exp_dir, "config.json")
558
- if not pathlib.Path(config_save_path).exists():
559
- with open(config_save_path, "w", encoding="utf-8") as f:
560
- json.dump(
561
- config.json_config[config_path],
562
- f,
563
- ensure_ascii=False,
564
- indent=4,
565
- sort_keys=True,
566
- )
567
- f.write("\n")
568
- if gpus16:
569
- cmd = (
570
- '"%s" infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s'
571
- % (
572
- config.python_cmd,
573
- exp_dir1,
574
- sr2,
575
- 1 if if_f0_3 else 0,
576
- batch_size12,
577
- gpus16,
578
- total_epoch11,
579
- save_epoch10,
580
- "-pg %s" % pretrained_G14 if pretrained_G14 != "" else "",
581
- "-pd %s" % pretrained_D15 if pretrained_D15 != "" else "",
582
- 1 if if_save_latest13 == i18n("是") else 0,
583
- 1 if if_cache_gpu17 == i18n("是") else 0,
584
- 1 if if_save_every_weights18 == i18n("是") else 0,
585
- version19,
586
- )
587
- )
588
- else:
589
- cmd = (
590
- '"%s" infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s'
591
- % (
592
- config.python_cmd,
593
- exp_dir1,
594
- sr2,
595
- 1 if if_f0_3 else 0,
596
- batch_size12,
597
- total_epoch11,
598
- save_epoch10,
599
- "-pg %s" % pretrained_G14 if pretrained_G14 != "" else "",
600
- "-pd %s" % pretrained_D15 if pretrained_D15 != "" else "",
601
- 1 if if_save_latest13 == i18n("是") else 0,
602
- 1 if if_cache_gpu17 == i18n("是") else 0,
603
- 1 if if_save_every_weights18 == i18n("是") else 0,
604
- version19,
605
- )
606
- )
607
- logger.info("Execute: " + cmd)
608
- p = Popen(cmd, shell=True, cwd=now_dir)
609
- p.wait()
610
- return "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log"
611
-
612
-
613
- # but4.click(train_index, [exp_dir1], info3)
614
- def train_index(exp_dir1, version19):
615
- # exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
616
- exp_dir = "logs/%s" % (exp_dir1)
617
- os.makedirs(exp_dir, exist_ok=True)
618
- feature_dir = (
619
- "%s/3_feature256" % (exp_dir)
620
- if version19 == "v1"
621
- else "%s/3_feature768" % (exp_dir)
622
- )
623
- if not os.path.exists(feature_dir):
624
- return "请先进行特征提取!"
625
- listdir_res = list(os.listdir(feature_dir))
626
- if len(listdir_res) == 0:
627
- return "请先进行特征提取!"
628
- infos = []
629
- npys = []
630
- for name in sorted(listdir_res):
631
- phone = np.load("%s/%s" % (feature_dir, name))
632
- npys.append(phone)
633
- big_npy = np.concatenate(npys, 0)
634
- big_npy_idx = np.arange(big_npy.shape[0])
635
- np.random.shuffle(big_npy_idx)
636
- big_npy = big_npy[big_npy_idx]
637
- if big_npy.shape[0] > 2e5:
638
- infos.append("Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0])
639
- yield "\n".join(infos)
640
- try:
641
- big_npy = (
642
- MiniBatchKMeans(
643
- n_clusters=10000,
644
- verbose=True,
645
- batch_size=256 * config.n_cpu,
646
- compute_labels=False,
647
- init="random",
648
- )
649
- .fit(big_npy)
650
- .cluster_centers_
651
- )
652
- except:
653
- info = traceback.format_exc()
654
- logger.info(info)
655
- infos.append(info)
656
- yield "\n".join(infos)
657
-
658
- np.save("%s/total_fea.npy" % exp_dir, big_npy)
659
- n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
660
- infos.append("%s,%s" % (big_npy.shape, n_ivf))
661
- yield "\n".join(infos)
662
- index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf)
663
- # index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf)
664
- infos.append("training")
665
- yield "\n".join(infos)
666
- index_ivf = faiss.extract_index_ivf(index) #
667
- index_ivf.nprobe = 1
668
- index.train(big_npy)
669
- faiss.write_index(
670
- index,
671
- "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index"
672
- % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
673
- )
674
- infos.append("adding")
675
- yield "\n".join(infos)
676
- batch_size_add = 8192
677
- for i in range(0, big_npy.shape[0], batch_size_add):
678
- index.add(big_npy[i : i + batch_size_add])
679
- faiss.write_index(
680
- index,
681
- "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index"
682
- % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
683
- )
684
- infos.append(
685
- "成功构建索引 added_IVF%s_Flat_nprobe_%s_%s_%s.index"
686
- % (n_ivf, index_ivf.nprobe, exp_dir1, version19)
687
- )
688
- try:
689
- link = os.link if platform.system() == "Windows" else os.symlink
690
- link(
691
- "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index"
692
- % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
693
- "%s/%s_IVF%s_Flat_nprobe_%s_%s_%s.index"
694
- % (
695
- outside_index_root,
696
- exp_dir1,
697
- n_ivf,
698
- index_ivf.nprobe,
699
- exp_dir1,
700
- version19,
701
- ),
702
- )
703
- infos.append("链接索引到外部-%s" % (outside_index_root))
704
- except:
705
- infos.append("链接索引到外部-%s失败" % (outside_index_root))
706
-
707
- # faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19))
708
- # infos.append("成功构建索引,added_IVF%s_Flat_FastScan_%s.index"%(n_ivf,version19))
709
- yield "\n".join(infos)
710
-
711
-
712
- # but5.click(train1key, [exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17], info3)
713
- def train1key(
714
- exp_dir1,
715
- sr2,
716
- if_f0_3,
717
- trainset_dir4,
718
- spk_id5,
719
- np7,
720
- f0method8,
721
- save_epoch10,
722
- total_epoch11,
723
- batch_size12,
724
- if_save_latest13,
725
- pretrained_G14,
726
- pretrained_D15,
727
- gpus16,
728
- if_cache_gpu17,
729
- if_save_every_weights18,
730
- version19,
731
- gpus_rmvpe,
732
- ):
733
- infos = []
734
-
735
- def get_info_str(strr):
736
- infos.append(strr)
737
- return "\n".join(infos)
738
-
739
- # step1:处理数据
740
- yield get_info_str(i18n("step1:正在处理数据"))
741
- [get_info_str(_) for _ in preprocess_dataset(trainset_dir4, exp_dir1, sr2, np7)]
742
-
743
- # step2a:提取音高
744
- yield get_info_str(i18n("step2:正在提取音高&正在提取特征"))
745
- [
746
- get_info_str(_)
747
- for _ in extract_f0_feature(
748
- gpus16, np7, f0method8, if_f0_3, exp_dir1, version19, gpus_rmvpe
749
- )
750
- ]
751
-
752
- # step3a:训练模型
753
- yield get_info_str(i18n("step3a:正在训练模型"))
754
- click_train(
755
- exp_dir1,
756
- sr2,
757
- if_f0_3,
758
- spk_id5,
759
- save_epoch10,
760
- total_epoch11,
761
- batch_size12,
762
- if_save_latest13,
763
- pretrained_G14,
764
- pretrained_D15,
765
- gpus16,
766
- if_cache_gpu17,
767
- if_save_every_weights18,
768
- version19,
769
- )
770
- yield get_info_str(
771
- i18n("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log")
772
- )
773
-
774
- # step3b:训练索引
775
- [get_info_str(_) for _ in train_index(exp_dir1, version19)]
776
- yield get_info_str(i18n("全流程结束!"))
777
-
778
-
779
- # ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__])
780
- def change_info_(ckpt_path):
781
- if not os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path), "train.log")):
782
- return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
783
- try:
784
- with open(
785
- ckpt_path.replace(os.path.basename(ckpt_path), "train.log"), "r"
786
- ) as f:
787
- info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1])
788
- sr, f0 = info["sample_rate"], info["if_f0"]
789
- version = "v2" if ("version" in info and info["version"] == "v2") else "v1"
790
- return sr, str(f0), version
791
- except:
792
- traceback.print_exc()
793
- return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
794
-
795
-
796
- F0GPUVisible = config.dml == False
797
-
798
-
799
- def change_f0_method(f0method8):
800
- if f0method8 == "rmvpe_gpu":
801
- visible = F0GPUVisible
802
- else:
803
- visible = False
804
- return {"visible": visible, "__type__": "update"}
805
 
806
 
807
- with gr.Blocks(title="RVC WebUI") as app:
808
- gr.Markdown("## RVC WebUI")
809
  gr.Markdown(
810
  value=i18n(
811
  "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>."
@@ -1607,9 +806,9 @@ with gr.Blocks(title="RVC WebUI") as app:
1607
  gr.Markdown(traceback.format_exc())
1608
 
1609
  if config.iscolab:
1610
- app.queue(max_size=1022).launch(share=True, max_threads=511)
1611
  else:
1612
- app.queue(max_size=1022).launch(
1613
  max_threads=511,
1614
  server_name="0.0.0.0",
1615
  inbrowser=not config.noautoopen,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from original import *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
 
6
+ with gr.Blocks(title="RVC UI") as app:
7
+ gr.Lab3l("RVC UI")
8
  gr.Markdown(
9
  value=i18n(
10
  "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>."
 
806
  gr.Markdown(traceback.format_exc())
807
 
808
  if config.iscolab:
809
+ app.queue().launch(share=True, max_threads=511)
810
  else:
811
+ app.queue().launch(
812
  max_threads=511,
813
  server_name="0.0.0.0",
814
  inbrowser=not config.noautoopen,